Przeglądaj źródła

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26: (1090 commits)
  [NET]: Fix and allocate less memory for ->priv'less netdevices
  [IPV6]: Fix dangling references on error in fib6_add().
  [NETLABEL]: Fix NULL deref in netlbl_unlabel_staticlist_gen() if ifindex not found
  [PKT_SCHED]: Fix datalen check in tcf_simp_init().
  [INET]: Uninline the __inet_inherit_port call.
  [INET]: Drop the inet_inherit_port() call.
  SCTP: Initialize partial_bytes_acked to 0, when all of the data is acked.
  [netdrvr] forcedeth: internal simplifications; changelog removal
  phylib: factor out get_phy_id from within get_phy_device
  PHY: add BCM5464 support to broadcom PHY driver
  cxgb3: Fix __must_check warning with dev_dbg.
  tc35815: Statistics cleanup
  natsemi: fix MMIO for PPC 44x platforms
  [TIPC]: Cleanup of TIPC reference table code
  [TIPC]: Optimized initialization of TIPC reference table
  [TIPC]: Remove inlining of reference table locking routines
  e1000: convert uint16_t style integers to u16
  ixgb: convert uint16_t style integers to u16
  sb1000.c: make const arrays static
  sb1000.c: stop inlining largish static functions
  ...
Linus Torvalds 17 lat temu
rodzic
commit
334d094504
100 zmienionych plików z 10356 dodań i 8239 usunięć
  1. 2 1
      Documentation/DocBook/Makefile
  2. 335 0
      Documentation/DocBook/mac80211.tmpl
  3. 0 37
      Documentation/feature-removal-schedule.txt
  4. 1 1
      Documentation/laptops/acer-wmi.txt
  5. 0 2
      Documentation/networking/00-INDEX
  6. 0 89
      Documentation/networking/bcm43xx.txt
  7. 0 621
      Documentation/networking/wan-router.txt
  8. 1 16
      MAINTAINERS
  9. 1 1
      arch/ia64/hp/sim/simeth.c
  10. 1 1
      arch/powerpc/platforms/82xx/ep8248e.c
  11. 1 1
      arch/powerpc/platforms/pasemi/gpio_mdio.c
  12. 3 2
      arch/powerpc/sysdev/fsl_soc.c
  13. 3 5
      arch/s390/defconfig
  14. 11 11
      drivers/atm/ambassador.c
  15. 12 12
      drivers/atm/horizon.c
  16. 1 1
      drivers/block/aoe/aoenet.c
  17. 372 357
      drivers/net/3c509.c
  18. 5 5
      drivers/net/8139too.c
  19. 4 2
      drivers/net/8390.c
  20. 8 88
      drivers/net/Kconfig
  21. 2 2
      drivers/net/Makefile
  22. 4 2
      drivers/net/appletalk/cops.c
  23. 3 2
      drivers/net/arcnet/arcnet.c
  24. 4 3
      drivers/net/arcnet/com20020.c
  25. 4 3
      drivers/net/at1700.c
  26. 4 3
      drivers/net/atarilance.c
  27. 0 2
      drivers/net/atl1/Makefile
  28. 0 286
      drivers/net/atl1/atl1.h
  29. 0 505
      drivers/net/atl1/atl1_ethtool.c
  30. 0 720
      drivers/net/atl1/atl1_hw.c
  31. 0 946
      drivers/net/atl1/atl1_hw.h
  32. 0 203
      drivers/net/atl1/atl1_param.c
  33. 1 0
      drivers/net/atlx/Makefile
  34. 1636 522
      drivers/net/atlx/atl1.c
  35. 796 0
      drivers/net/atlx/atl1.h
  36. 433 0
      drivers/net/atlx/atlx.c
  37. 506 0
      drivers/net/atlx/atlx.h
  38. 2 2
      drivers/net/atp.c
  39. 3 3
      drivers/net/au1000_eth.c
  40. 1 1
      drivers/net/bfin_mac.c
  41. 1 1
      drivers/net/bonding/bond_3ad.c
  42. 1 1
      drivers/net/bonding/bond_alb.c
  43. 6 8
      drivers/net/bonding/bond_main.c
  44. 4 8
      drivers/net/cassini.c
  45. 2 3
      drivers/net/cpmac.c
  46. 2 2
      drivers/net/cxgb3/cxgb3_main.c
  47. 19 3
      drivers/net/cxgb3/cxgb3_offload.c
  48. 1 1
      drivers/net/cxgb3/l2t.c
  49. 2 1
      drivers/net/defxx.c
  50. 56 57
      drivers/net/e1000/e1000.h
  51. 88 87
      drivers/net/e1000/e1000_ethtool.c
  52. 770 771
      drivers/net/e1000/e1000_hw.c
  53. 286 286
      drivers/net/e1000/e1000_hw.h
  54. 147 158
      drivers/net/e1000/e1000_main.c
  55. 0 7
      drivers/net/e1000/e1000_osdep.h
  56. 98 65
      drivers/net/e1000e/82571.c
  57. 1 1
      drivers/net/e1000e/Makefile
  58. 58 51
      drivers/net/e1000e/defines.h
  59. 19 18
      drivers/net/e1000e/e1000.h
  60. 78 59
      drivers/net/e1000e/es2lan.c
  61. 157 125
      drivers/net/e1000e/ethtool.c
  62. 89 84
      drivers/net/e1000e/hw.h
  63. 181 128
      drivers/net/e1000e/ich8lan.c
  64. 172 176
      drivers/net/e1000e/lib.c
  65. 403 242
      drivers/net/e1000e/netdev.c
  66. 21 12
      drivers/net/e1000e/param.c
  67. 103 61
      drivers/net/e1000e/phy.c
  68. 3 3
      drivers/net/ehea/ehea.h
  69. 47 45
      drivers/net/ehea/ehea_main.c
  70. 1 1
      drivers/net/fec_mpc52xx.c
  71. 1 1
      drivers/net/fec_mpc52xx_phy.c
  72. 78 162
      drivers/net/forcedeth.c
  73. 2 2
      drivers/net/fs_enet/fs_enet-main.c
  74. 2 2
      drivers/net/fs_enet/mii-bitbang.c
  75. 2 2
      drivers/net/fs_enet/mii-fec.c
  76. 53 27
      drivers/net/gianfar.c
  77. 16 4
      drivers/net/gianfar.h
  78. 1 1
      drivers/net/gianfar_mii.c
  79. 2 2
      drivers/net/hamradio/bpqether.c
  80. 3 39
      drivers/net/ibmveth.c
  81. 39 21
      drivers/net/ixgb/ixgb.h
  82. 62 62
      drivers/net/ixgb/ixgb_ee.c
  83. 6 6
      drivers/net/ixgb/ixgb_ee.h
  84. 29 38
      drivers/net/ixgb/ixgb_ethtool.c
  85. 99 100
      drivers/net/ixgb/ixgb_hw.c
  86. 125 125
      drivers/net/ixgb/ixgb_hw.h
  87. 82 74
      drivers/net/ixgb/ixgb_main.c
  88. 0 7
      drivers/net/ixgb/ixgb_osdep.h
  89. 76 11
      drivers/net/ixgbe/ixgbe.h
  90. 28 11
      drivers/net/ixgbe/ixgbe_ethtool.c
  91. 1176 344
      drivers/net/ixgbe/ixgbe_main.c
  92. 1233 0
      drivers/net/korina.c
  93. 1 1
      drivers/net/loopback.c
  94. 1 1
      drivers/net/macb.c
  95. 1 1
      drivers/net/macvlan.c
  96. 231 244
      drivers/net/mv643xx_eth.c
  97. 11 9
      drivers/net/natsemi.c
  98. 0 18
      drivers/net/netxen/netxen_nic.h
  99. 2 0
      drivers/net/netxen/netxen_nic_isr.c
  100. 18 1
      drivers/net/netxen/netxen_nic_main.c

+ 2 - 1
Documentation/DocBook/Makefile

@@ -11,7 +11,8 @@ DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \
 	    procfs-guide.xml writing_usb_driver.xml networking.xml \
 	    kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
 	    gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
-	    genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml
+	    genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
+	    mac80211.xml
 
 ###
 # The build process is as follows (targets):

+ 335 - 0
Documentation/DocBook/mac80211.tmpl

@@ -0,0 +1,335 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="mac80211-developers-guide">
+  <bookinfo>
+    <title>The mac80211 subsystem for kernel developers</title>
+
+    <authorgroup>
+      <author>
+        <firstname>Johannes</firstname>
+        <surname>Berg</surname>
+        <affiliation>
+          <address><email>johannes@sipsolutions.net</email></address>
+        </affiliation>
+      </author>
+    </authorgroup>
+
+    <copyright>
+      <year>2007</year>
+      <year>2008</year>
+      <holder>Johannes Berg</holder>
+    </copyright>
+
+    <legalnotice>
+      <para>
+        This documentation is free software; you can redistribute
+        it and/or modify it under the terms of the GNU General Public
+        License version 2 as published by the Free Software Foundation.
+      </para>
+
+      <para>
+        This documentation is distributed in the hope that it will be
+        useful, but WITHOUT ANY WARRANTY; without even the implied
+        warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+        See the GNU General Public License for more details.
+      </para>
+
+      <para>
+        You should have received a copy of the GNU General Public
+        License along with this documentation; if not, write to the Free
+        Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+        MA 02111-1307 USA
+      </para>
+
+      <para>
+        For more details see the file COPYING in the source
+        distribution of Linux.
+      </para>
+    </legalnotice>
+
+    <abstract>
+!Pinclude/net/mac80211.h Introduction
+!Pinclude/net/mac80211.h Warning
+    </abstract>
+  </bookinfo>
+
+  <toc></toc>
+
+<!--
+Generally, this document shall be ordered by increasing complexity.
+It is important to note that readers should be able to read only
+the first few sections to get a working driver and only advanced
+usage should require reading the full document.
+-->
+
+  <part>
+    <title>The basic mac80211 driver interface</title>
+    <partintro>
+      <para>
+        You should read and understand the information contained
+        within this part of the book while implementing a driver.
+        In some chapters, advanced usage is noted, that may be
+        skipped at first.
+      </para>
+      <para>
+        This part of the book only covers station and monitor mode
+        functionality, additional information required to implement
+        the other modes is covered in the second part of the book.
+      </para>
+    </partintro>
+
+    <chapter id="basics">
+      <title>Basic hardware handling</title>
+      <para>TBD</para>
+      <para>
+        This chapter shall contain information on getting a hw
+        struct allocated and registered with mac80211.
+      </para>
+      <para>
+        Since it is required to allocate rates/modes before registering
+        a hw struct, this chapter shall also contain information on setting
+        up the rate/mode structs.
+      </para>
+      <para>
+        Additionally, some discussion about the callbacks and
+        the general programming model should be in here, including
+        the definition of ieee80211_ops which will be referred to
+        a lot.
+      </para>
+      <para>
+        Finally, a discussion of hardware capabilities should be done
+        with references to other parts of the book.
+      </para>
+<!-- intentionally multiple !F lines to get proper order -->
+!Finclude/net/mac80211.h ieee80211_hw
+!Finclude/net/mac80211.h ieee80211_hw_flags
+!Finclude/net/mac80211.h SET_IEEE80211_DEV
+!Finclude/net/mac80211.h SET_IEEE80211_PERM_ADDR
+!Finclude/net/mac80211.h ieee80211_ops
+!Finclude/net/mac80211.h ieee80211_alloc_hw
+!Finclude/net/mac80211.h ieee80211_register_hw
+!Finclude/net/mac80211.h ieee80211_get_tx_led_name
+!Finclude/net/mac80211.h ieee80211_get_rx_led_name
+!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
+!Finclude/net/mac80211.h ieee80211_get_radio_led_name
+!Finclude/net/mac80211.h ieee80211_unregister_hw
+!Finclude/net/mac80211.h ieee80211_free_hw
+    </chapter>
+
+    <chapter id="phy-handling">
+      <title>PHY configuration</title>
+      <para>TBD</para>
+      <para>
+        This chapter should describe PHY handling including
+        start/stop callbacks and the various structures used.
+      </para>
+!Finclude/net/mac80211.h ieee80211_conf
+!Finclude/net/mac80211.h ieee80211_conf_flags
+    </chapter>
+
+    <chapter id="iface-handling">
+      <title>Virtual interfaces</title>
+      <para>TBD</para>
+      <para>
+        This chapter should describe virtual interface basics
+        that are relevant to the driver (VLANs, MGMT etc are not.)
+        It should explain the use of the add_iface/remove_iface
+        callbacks as well as the interface configuration callbacks.
+      </para>
+      <para>Things related to AP mode should be discussed there.</para>
+      <para>
+        Things related to supporting multiple interfaces should be
+        in the appropriate chapter, a BIG FAT note should be here about
+        this though and the recommendation to allow only a single
+        interface in STA mode at first!
+      </para>
+!Finclude/net/mac80211.h ieee80211_if_types
+!Finclude/net/mac80211.h ieee80211_if_init_conf
+!Finclude/net/mac80211.h ieee80211_if_conf
+    </chapter>
+
+    <chapter id="rx-tx">
+      <title>Receive and transmit processing</title>
+      <sect1>
+        <title>what should be here</title>
+        <para>TBD</para>
+        <para>
+          This should describe the receive and transmit
+          paths in mac80211/the drivers as well as
+          transmit status handling.
+        </para>
+      </sect1>
+      <sect1>
+        <title>Frame format</title>
+!Pinclude/net/mac80211.h Frame format
+      </sect1>
+      <sect1>
+        <title>Alignment issues</title>
+        <para>TBD</para>
+      </sect1>
+      <sect1>
+        <title>Calling into mac80211 from interrupts</title>
+!Pinclude/net/mac80211.h Calling mac80211 from interrupts
+      </sect1>
+      <sect1>
+        <title>functions/definitions</title>
+!Finclude/net/mac80211.h ieee80211_rx_status
+!Finclude/net/mac80211.h mac80211_rx_flags
+!Finclude/net/mac80211.h ieee80211_tx_control
+!Finclude/net/mac80211.h ieee80211_tx_status_flags
+!Finclude/net/mac80211.h ieee80211_rx
+!Finclude/net/mac80211.h ieee80211_rx_irqsafe
+!Finclude/net/mac80211.h ieee80211_tx_status
+!Finclude/net/mac80211.h ieee80211_tx_status_irqsafe
+!Finclude/net/mac80211.h ieee80211_rts_get
+!Finclude/net/mac80211.h ieee80211_rts_duration
+!Finclude/net/mac80211.h ieee80211_ctstoself_get
+!Finclude/net/mac80211.h ieee80211_ctstoself_duration
+!Finclude/net/mac80211.h ieee80211_generic_frame_duration
+!Finclude/net/mac80211.h ieee80211_get_hdrlen_from_skb
+!Finclude/net/mac80211.h ieee80211_get_hdrlen
+!Finclude/net/mac80211.h ieee80211_wake_queue
+!Finclude/net/mac80211.h ieee80211_stop_queue
+!Finclude/net/mac80211.h ieee80211_start_queues
+!Finclude/net/mac80211.h ieee80211_stop_queues
+!Finclude/net/mac80211.h ieee80211_wake_queues
+      </sect1>
+    </chapter>
+
+    <chapter id="filters">
+      <title>Frame filtering</title>
+!Pinclude/net/mac80211.h Frame filtering
+!Finclude/net/mac80211.h ieee80211_filter_flags
+    </chapter>
+  </part>
+
+  <part id="advanced">
+    <title>Advanced driver interface</title>
+    <partintro>
+      <para>
+       Information contained within this part of the book is
+       of interest only for advanced interaction of mac80211
+       with drivers to exploit more hardware capabilities and
+       improve performance.
+      </para>
+    </partintro>
+
+    <chapter id="hardware-crypto-offload">
+      <title>Hardware crypto acceleration</title>
+!Pinclude/net/mac80211.h Hardware crypto acceleration
+<!-- intentionally multiple !F lines to get proper order -->
+!Finclude/net/mac80211.h set_key_cmd
+!Finclude/net/mac80211.h ieee80211_key_conf
+!Finclude/net/mac80211.h ieee80211_key_alg
+!Finclude/net/mac80211.h ieee80211_key_flags
+    </chapter>
+
+    <chapter id="qos">
+      <title>Multiple queues and QoS support</title>
+      <para>TBD</para>
+!Finclude/net/mac80211.h ieee80211_tx_queue_params
+!Finclude/net/mac80211.h ieee80211_tx_queue_stats_data
+!Finclude/net/mac80211.h ieee80211_tx_queue
+    </chapter>
+
+    <chapter id="AP">
+      <title>Access point mode support</title>
+      <para>TBD</para>
+      <para>Some parts of the if_conf should be discussed here instead</para>
+      <para>
+        Insert notes about VLAN interfaces with hw crypto here or
+        in the hw crypto chapter.
+      </para>
+!Finclude/net/mac80211.h ieee80211_get_buffered_bc
+!Finclude/net/mac80211.h ieee80211_beacon_get
+    </chapter>
+
+    <chapter id="multi-iface">
+      <title>Supporting multiple virtual interfaces</title>
+      <para>TBD</para>
+      <para>
+        Note: WDS with identical MAC address should almost always be OK
+      </para>
+      <para>
+        Insert notes about having multiple virtual interfaces with
+        different MAC addresses here, note which configurations are
+        supported by mac80211, add notes about supporting hw crypto
+        with it.
+      </para>
+    </chapter>
+
+    <chapter id="hardware-scan-offload">
+      <title>Hardware scan offload</title>
+      <para>TBD</para>
+!Finclude/net/mac80211.h ieee80211_scan_completed
+    </chapter>
+  </part>
+
+  <part id="rate-control">
+    <title>Rate control interface</title>
+    <partintro>
+      <para>TBD</para>
+      <para>
+       This part of the book describes the rate control algorithm
+       interface and how it relates to mac80211 and drivers.
+      </para>
+    </partintro>
+    <chapter id="dummy">
+      <title>dummy chapter</title>
+      <para>TBD</para>
+    </chapter>
+  </part>
+
+  <part id="internal">
+    <title>Internals</title>
+    <partintro>
+      <para>TBD</para>
+      <para>
+       This part of the book describes mac80211 internals.
+      </para>
+    </partintro>
+
+    <chapter id="key-handling">
+      <title>Key handling</title>
+      <sect1>
+        <title>Key handling basics</title>
+!Pnet/mac80211/key.c Key handling basics
+      </sect1>
+      <sect1>
+        <title>MORE TBD</title>
+        <para>TBD</para>
+      </sect1>
+    </chapter>
+
+    <chapter id="rx-processing">
+      <title>Receive processing</title>
+      <para>TBD</para>
+    </chapter>
+
+    <chapter id="tx-processing">
+      <title>Transmit processing</title>
+      <para>TBD</para>
+    </chapter>
+
+    <chapter id="sta-info">
+      <title>Station info handling</title>
+      <sect1>
+        <title>Programming information</title>
+!Fnet/mac80211/sta_info.h sta_info
+!Fnet/mac80211/sta_info.h ieee80211_sta_info_flags
+      </sect1>
+      <sect1>
+        <title>STA information lifetime rules</title>
+!Pnet/mac80211/sta_info.c STA information lifetime rules
+      </sect1>
+    </chapter>
+
+    <chapter id="synchronisation">
+      <title>Synchronisation</title>
+      <para>TBD</para>
+      <para>Locking, lots of RCU</para>
+    </chapter>
+  </part>
+</book>

+ 0 - 37
Documentation/feature-removal-schedule.txt

@@ -203,14 +203,6 @@ Who:  linuxppc-dev@ozlabs.org
 
 ---------------------------
 
-What:   sk98lin network driver
-When:   Feburary 2008
-Why:    In kernel tree version of driver is unmaintained. Sk98lin driver
-	replaced by the skge driver. 
-Who:    Stephen Hemminger <shemminger@linux-foundation.org>
-
----------------------------
-
 What:	i386/x86_64 bzImage symlinks
 When:	April 2010
 
@@ -221,8 +213,6 @@ Who:	Thomas Gleixner <tglx@linutronix.de>
 
 ---------------------------
 
----------------------------
-
 What:	i2c-i810, i2c-prosavage and i2c-savage4
 When:	May 2008
 Why:	These drivers are superseded by i810fb, intelfb and savagefb.
@@ -230,33 +220,6 @@ Who:	Jean Delvare <khali@linux-fr.org>
 
 ---------------------------
 
-What:	bcm43xx wireless network driver
-When:	2.6.26
-Files:	drivers/net/wireless/bcm43xx
-Why:	This driver's functionality has been replaced by the
-	mac80211-based b43 and b43legacy drivers.
-Who:	John W. Linville <linville@tuxdriver.com>
-
----------------------------
-
-What:	ieee80211 softmac wireless networking component
-When:	2.6.26 (or after removal of bcm43xx and port of zd1211rw to mac80211)
-Files:	net/ieee80211/softmac
-Why:	No in-kernel drivers will depend on it any longer.
-Who:	John W. Linville <linville@tuxdriver.com>
-
----------------------------
-
-What:	rc80211-simple rate control algorithm for mac80211
-When:	2.6.26
-Files:	net/mac80211/rc80211-simple.c
-Why:	This algorithm was provided for reference but always exhibited bad
-	responsiveness and performance and has some serious flaws. It has been
-	replaced by rc80211-pid.
-Who:	Stefano Brivio <stefano.brivio@polimi.it>
-
----------------------------
-
 What (Why):
 	- include/linux/netfilter_ipv4/ipt_TOS.h ipt_tos.h header files
 	  (superseded by xt_TOS/xt_tos target & match)

+ 1 - 1
Documentation/laptops/acer-wmi.txt

@@ -80,7 +80,7 @@ once you enable the radio, will depend on your hardware and driver combination.
 e.g. With the BCM4318 on the Acer Aspire 5020 series:
 
 ndiswrapper: Light blinks on when transmitting
-bcm43xx/b43: Solid light, blinks off when transmitting
+b43: Solid light, blinks off when transmitting
 
 Wireless radio control is unconditionally enabled - all Acer laptops that support
 acer-wmi come with built-in wireless. However, should you feel so inclined to

+ 0 - 2
Documentation/networking/00-INDEX

@@ -100,8 +100,6 @@ tuntap.txt
 	- TUN/TAP device driver, allowing user space Rx/Tx of packets.
 vortex.txt
 	- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
-wan-router.txt
-	- WAN router documentation
 wavelan.txt
 	- AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
 x25.txt

+ 0 - 89
Documentation/networking/bcm43xx.txt

@@ -1,89 +0,0 @@
-
-			BCM43xx Linux Driver Project
-			============================
-
-Introduction
-------------
-
-Many of the wireless devices found in modern notebook computers are
-based on the wireless chips produced by Broadcom. These devices have
-been a problem for Linux users as there is no open-source driver
-available. In addition, Broadcom has not released specifications
-for the device, and driver availability has been limited to the
-binary-only form used in the GPL versions of AP hardware such as the
-Linksys WRT54G, and the Windows and OS X drivers.  Before this project
-began, the only way to use these devices were to use the Windows or
-OS X drivers with either the Linuxant or ndiswrapper modules. There
-is a strong penalty if this method is used as loading the binary-only
-module "taints" the kernel, and no kernel developer will help diagnose
-any kernel problems.
-
-Development
------------
-
-This driver has been developed using
-a clean-room technique that is described at
-http://bcm-specs.sipsolutions.net/ReverseEngineeringProcess. For legal
-reasons, none of the clean-room crew works on the on the Linux driver,
-and none of the Linux developers sees anything but the specifications,
-which are the ultimate product of the reverse-engineering group.
-
-Software
---------
-
-Since the release of the 2.6.17 kernel, the bcm43xx driver has been
-distributed with the kernel source, and is prebuilt in most, if not
-all, distributions.  There is, however, additional software that is
-required. The firmware used by the chip is the intellectual property
-of Broadcom and they have not given the bcm43xx team redistribution
-rights to this firmware.  Since we cannot legally redistribute
-the firmware we cannot include it with the driver. Furthermore, it
-cannot be placed in the downloadable archives of any distributing
-organization; therefore, the user is responsible for obtaining the
-firmware and placing it in the appropriate location so that the driver
-can find it when initializing.
-
-To help with this process, the bcm43xx developers provide a separate
-program named bcm43xx-fwcutter to "cut" the firmware out of a
-Windows or OS X driver and write the extracted files to the proper
-location. This program is usually provided with the distribution;
-however, it may be downloaded from
-
-http://developer.berlios.de/project/showfiles.php?group_id=4547
-
-The firmware is available in two versions. V3 firmware is used with
-the in-kernel bcm43xx driver that uses a software MAC layer called
-SoftMAC, and will have a microcode revision of 0x127 or smaller. The
-V4 firmware is used by an out-of-kernel driver employing a variation of
-the Devicescape MAC layer known as d80211. Once bcm43xx-d80211 reaches
-a satisfactory level of development, it will replace bcm43xx-softmac
-in the kernel as it is much more flexible and powerful.
-
-A source for the latest V3 firmware is
-
-http://downloads.openwrt.org/sources/wl_apsta-3.130.20.0.o
-
-Once this file is downloaded, the command
-'bcm43xx-fwcutter -w <dir> <filename>'
-will extract the microcode and write it to directory
-<dir>. The correct directory will depend on your distribution;
-however, most use '/lib/firmware'. Once this step is completed,
-the bcm3xx driver should load when the system is booted. To see
-any messages relating to the driver, issue the command 'dmesg |
-grep bcm43xx' from a terminal window. If there are any problems,
-please send that output to Bcm43xx-dev@lists.berlios.de.
-
-Although the driver has been in-kernel since 2.6.17, the earliest
-version is quite limited in its capability. Patches that include
-all features of later versions are available for the stable kernel
-versions from 2.6.18. These will be needed if you use a BCM4318,
-or a PCI Express version (BCM4311 and BCM4312). In addition, if you
-have an early BCM4306 and more than 1 GB RAM, your kernel will need
-to be patched.	These patches, which are being updated regularly,
-are available at ftp://lwfinger.dynalias.org/patches. Look for
-combined_2.6.YY.patch. Of course you will need kernel source downloaded
-from kernel.org, or the source from your distribution.
-
-If you build your own kernel, please enable CONFIG_BCM43XX_DEBUG
-and CONFIG_IEEE80211_SOFTMAC_DEBUG. The log information provided is
-essential for solving any problems.

+ 0 - 621
Documentation/networking/wan-router.txt

@@ -1,621 +0,0 @@
-------------------------------------------------------------------------------
-Linux WAN Router Utilities Package
-------------------------------------------------------------------------------
-Version 2.2.1 
-Mar 28, 2001
-Author: Nenad Corbic <ncorbic@sangoma.com>
-Copyright (c) 1995-2001 Sangoma Technologies Inc.
-------------------------------------------------------------------------------
-
-INTRODUCTION
-
-Wide Area Networks (WANs) are used to interconnect Local Area Networks (LANs)
-and/or stand-alone hosts over vast distances with data transfer rates
-significantly higher than those achievable with commonly used dial-up
-connections.
-
-Usually an external device called `WAN router' sitting on your local network
-or connected to your machine's serial port provides physical connection to
-WAN.  Although router's job may be as simple as taking your local network
-traffic, converting it to WAN format and piping it through the WAN link, these
-devices are notoriously expensive, with prices as much as 2 - 5 times higher
-then the price of a typical PC box.
-
-Alternatively, considering robustness and multitasking capabilities of Linux,
-an internal router can be built (most routers use some sort of stripped down
-Unix-like operating system anyway). With a number of relatively inexpensive WAN
-interface cards available on the market, a perfectly usable router can be
-built for less than half a price of an external router.  Yet a Linux box
-acting as a router can still be used for other purposes, such as fire-walling,
-running FTP, WWW or DNS server, etc.
-
-This kernel module introduces the notion of a WAN Link Driver (WLD) to Linux
-operating system and provides generic hardware-independent services for such
-drivers.  Why can existing Linux network device interface not be used for
-this purpose?  Well, it can.  However, there are a few key differences between
-a typical network interface (e.g. Ethernet) and a WAN link.
-
-Many WAN protocols, such as X.25 and frame relay, allow for multiple logical
-connections (known as `virtual circuits' in X.25 terminology) over a single
-physical link.  Each such virtual circuit may (and almost always does) lead
-to a different geographical location and, therefore, different network.  As a
-result, it is the virtual circuit, not the physical link, that represents a
-route and, therefore, a network interface in Linux terms.
-
-To further complicate things, virtual circuits are usually volatile in nature
-(excluding so called `permanent' virtual circuits or PVCs).  With almost no
-time required to set up and tear down a virtual circuit, it is highly desirable
-to implement on-demand connections in order to minimize network charges.  So
-unlike a typical network driver, the WAN driver must be able to handle multiple
-network interfaces and cope as multiple virtual circuits come into existence
-and go away dynamically.
- 
-Last, but not least, WAN configuration is much more complex than that of say
-Ethernet and may well amount to several dozens of parameters.  Some of them
-are "link-wide"  while others are virtual circuit-specific.  The same holds
-true for WAN statistics which is by far more extensive and extremely useful
-when troubleshooting WAN connections.  Extending the ifconfig utility to suit
-these needs may be possible, but does not seem quite reasonable.  Therefore, a
-WAN configuration utility and corresponding application programmer's interface
-is needed for this purpose.
-
-Most of these problems are taken care of by this module.  Its goal is to
-provide a user with more-or-less standard look and feel for all WAN devices and
-assist a WAN device driver writer by providing common services, such as:
-
- o User-level interface via /proc file system
- o Centralized configuration
- o Device management (setup, shutdown, etc.)
- o Network interface management (dynamic creation/destruction)
- o Protocol encapsulation/decapsulation
-
-To ba able to use the Linux WAN Router you will also need a WAN Tools package
-available from
-
-	ftp.sangoma.com/pub/linux/current_wanpipe/wanpipe-X.Y.Z.tgz
-
-where vX.Y.Z represent the wanpipe version number.
-
-For technical questions and/or comments please e-mail to ncorbic@sangoma.com.
-For general inquiries please contact Sangoma Technologies Inc. by
-
-	Hotline:	1-800-388-2475	(USA and Canada, toll free)
-	Phone:		(905) 474-1990  ext: 106
-	Fax:		(905) 474-9223
-	E-mail:		dm@sangoma.com	(David Mandelstam)
-	WWW:		http://www.sangoma.com
-
-
-INSTALLATION
-
-Please read the WanpipeForLinux.pdf manual on how to 
-install the WANPIPE tools and drivers properly. 
-
-
-After installing wanpipe package: /usr/local/wanrouter/doc. 
-On the ftp.sangoma.com : /linux/current_wanpipe/doc
-
-
-COPYRIGHT AND LICENSING INFORMATION
-
-This program is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation; either version 2, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc., 675 Mass
-Ave, Cambridge, MA 02139, USA.
-
-
-
-ACKNOWLEDGEMENTS
-
-This product is based on the WANPIPE(tm) Multiprotocol WAN Router developed
-by Sangoma Technologies Inc. for Linux 2.0.x and 2.2.x.  Success of the WANPIPE
-together with the next major release of Linux kernel in summer 1996 commanded
-adequate changes to the WANPIPE code to take full advantage of new Linux
-features.
-
-Instead of continuing developing proprietary interface tied to Sangoma WAN
-cards, we decided to separate all hardware-independent code into a separate
-module and defined two levels of interfaces - one for user-level applications
-and another for kernel-level WAN drivers.  WANPIPE is now implemented as a
-WAN driver compliant with the WAN Link Driver interface.  Also a general
-purpose WAN configuration utility and a set of shell scripts was developed to 
-support WAN router at the user level.
-
-Many useful ideas concerning hardware-independent interface implementation
-were given by Mike McLagan <mike.mclagan@linux.org> and his implementation
-of the Frame Relay router and drivers for Sangoma cards (dlci/sdla).
-
-With the new implementation of the APIs being incorporated into the WANPIPE,
-a special thank goes to Alan Cox in providing insight into BSD sockets.
-
-Special thanks to all the WANPIPE users who performed field-testing, reported
-bugs and made valuable comments and suggestions that help us to improve this
-product.
-
-
-
-NEW IN THIS RELEASE
-
-	o Updated the WANCFG utility
-		Calls the pppconfig to configure the PPPD
-		for async connections.
-
-	o Added the PPPCONFIG utility
-		Used to configure the PPPD daemon for the
-		WANPIPE Async PPP and standard serial port.
-		The wancfg calls the pppconfig to configure
-		the pppd.
-
-	o Fixed the PCI autodetect feature.  
-		The SLOT 0 was used as an autodetect option
-		however, some high end PC's slot numbers start
-		from 0. 
-
-	o This release has been tested with the new backupd
-	  daemon release.
-	
-
-PRODUCT COMPONENTS AND RELATED FILES
-
-/etc: (or user defined)
-	wanpipe1.conf	default router configuration file
-
-/lib/modules/X.Y.Z/misc:
-	wanrouter.o	router kernel loadable module
-	af_wanpipe.o	wanpipe api socket module
-
-/lib/modules/X.Y.Z/net:
-	sdladrv.o	Sangoma SDLA support module
-	wanpipe.o	Sangoma WANPIPE(tm) driver module
-
-/proc/net/wanrouter
-	Config		reads current router configuration
-	Status		reads current router status
-	{name}		reads WAN driver statistics
-
-/usr/sbin:
-	wanrouter	wanrouter start-up script
-	wanconfig	wanrouter configuration utility
-	sdladump	WANPIPE adapter memory dump utility
-        fpipemon        Monitor for Frame Relay
-        cpipemon        Monitor for Cisco HDLC
-	ppipemon 	Monitor for PPP
-	xpipemon 	Monitor for X25
-	wpkbdmon        WANPIPE keyboard led monitor/debugger
-
-/usr/local/wanrouter:
-	README		this file
-	COPYING		GNU General Public License
-	Setup		installation script
-	Filelist	distribution definition file
-	wanrouter.rc	meta-configuration file 
-			(used by the Setup and wanrouter script)
-
-/usr/local/wanrouter/doc:
-	wanpipeForLinux.pdf 	WAN Router User's Manual
-
-/usr/local/wanrouter/patches:
-	wanrouter-v2213.gz  	patch for Linux kernels 2.2.11 up to 2.2.13.
-	wanrouter-v2214.gz	patch for Linux kernel 2.2.14. 
-	wanrouter-v2215.gz	patch for Linux kernels 2.2.15 to 2.2.17.
-	wanrouter-v2218.gz	patch for Linux kernels 2.2.18 and up.
-	wanrouter-v240.gz	patch for Linux kernel 2.4.0.  
-	wanrouter-v242.gz	patch for Linux kernel 2.4.2 and up.
-	wanrouter-v2034.gz	patch for Linux kernel 2.0.34
-	wanrouter-v2036.gz 	patch for Linux kernel 2.0.36 and up. 
-
-/usr/local/wanrouter/patches/kdrivers:
-	Sources of the latest WANPIPE device drivers.
-	These are used to UPGRADE the linux kernel to the newest
-	version if the kernel source has already been patched with
-	WANPIPE drivers.
-
-/usr/local/wanrouter/samples:
-	interface	sample interface configuration file
-	wanpipe1.cpri 	CHDLC primary port
-     	wanpipe2.csec 	CHDLC secondary port
-     	wanpipe1.fr   	Frame Relay protocol
-     	wanpipe1.ppp  	PPP protocol ) 
-	wanpipe1.asy	CHDLC ASYNC protocol
-	wanpipe1.x25	X25 protocol
-	wanpipe1.stty	Sync TTY driver (Used by Kernel PPPD daemon)
-	wanpipe1.atty	Async TTY driver (Used by Kernel PPPD daemon)
-	wanrouter.rc	sample meta-configuration file
-
-/usr/local/wanrouter/util:
-	*		wan-tools utilities source code
-
-/usr/local/wanrouter/api/x25:
-	*		x25 api sample programs.
-/usr/local/wanrouter/api/chdlc:
-	*		chdlc api sample programs.
-/usr/local/wanrouter/api/fr:
-	*		fr api sample programs.
-/usr/local/wanrouter/config/wancfg:
-	wancfg		WANPIPE GUI configuration program.
-                        Creates wanpipe#.conf files. 
-/usr/local/wanrouter/config/cfgft1:
-	cfgft1		GUI CSU/DSU configuration program.
-
-/usr/include/linux:
-	wanrouter.h	router API definitions
-	wanpipe.h	WANPIPE API definitions
-	sdladrv.h	SDLA support module API definitions
-	sdlasfm.h	SDLA firmware module definitions
-	if_wanpipe.h	WANPIPE Socket definitions
-	sdlapci.h	WANPIPE PCI definitions
-	
-
-/usr/src/linux/net/wanrouter:
-	*		wanrouter source code
-
-/var/log:
-	wanrouter	wanrouter start-up log (created by the Setup script)
-
-/var/lock:  (or /var/lock/subsys for RedHat)
-	wanrouter	wanrouter lock file (created by the Setup script)
-
-/usr/local/wanrouter/firmware:
-	fr514.sfm	Frame relay firmware for Sangoma S508/S514 card
-	cdual514.sfm	Dual Port Cisco HDLC firmware for Sangoma S508/S514 card
-	ppp514.sfm      PPP Firmware for Sangoma S508 and S514 cards
-	x25_508.sfm	X25 Firmware for Sangoma S508 card.
-
-
-REVISION HISTORY
-
-1.0.0	December 31, 1996	Initial version
-
-1.0.1	January 30, 1997	Status and statistics can be read via /proc
-				filesystem entries.
-
-1.0.2   April 30, 1997          Added UDP management via monitors.
-
-1.0.3	June 3, 1997		UDP management for multiple boards using Frame
-				Relay and PPP
-				Enabled continuous transmission of Configure 
-				Request Packet for PPP (for 508 only)
-				Connection Timeout for PPP changed from 900 to 0
-				Flow Control Problem fixed for Frame Relay
-
-1.0.4	July 10, 1997		S508/FT1 monitoring capability in fpipemon and
-				ppipemon utilities.
-				Configurable TTL for UDP packets.
-				Multicast and Broadcast IP source addresses are
-				silently discarded.
-
-1.0.5	July 28, 1997		Configurable T391,T392,N391,N392,N393 for Frame
-				Relay in router.conf.
-				Configurable Memory Address through router.conf 
-				for Frame Relay, PPP and X.25. (commenting this
- 				out enables auto-detection).
-				Fixed freeing up received buffers using kfree()
- 				for Frame Relay and X.25.
-				Protect sdla_peek() by calling save_flags(),
-				cli() and restore_flags().
-				Changed number of Trace elements from 32 to 20
-				Added DLCI specific data monitoring in FPIPEMON. 
-2.0.0	Nov 07, 1997		Implemented protection of RACE conditions by 
-				critical flags for FRAME RELAY and PPP.
-				DLCI List interrupt mode implemented.
-				IPX support in FRAME RELAY and PPP.
-				IPX Server Support (MARS)
-				More driver specific stats included in FPIPEMON
-				and PIPEMON.
-
-2.0.1	Nov 28, 1997		Bug Fixes for version 2.0.0.
-				Protection of "enable_irq()" while 
-				"disable_irq()" has been enabled from any other
-				routine (for Frame Relay, PPP and X25).
-				Added additional Stats for Fpipemon and Ppipemon
-				Improved Load Sharing for multiple boards
-
-2.0.2	Dec 09, 1997		Support for PAP and CHAP for ppp has been
-				implemented.
-
-2.0.3	Aug 15, 1998		New release supporting Cisco HDLC, CIR for Frame
-				relay, Dynamic IP assignment for PPP and Inverse
-				Arp support for Frame-relay.  Man Pages are 
-				included for better support and a new utility
-				for configuring FT1 cards.
-
-2.0.4	Dec 09, 1998	        Dual Port support for Cisco HDLC.
-				Support for HDLC (LAPB) API.
-				Supports BiSync Streaming code for S502E 
-				and S503 cards.
-				Support for Streaming HDLC API.
-				Provides a BSD socket interface for 
-				creating applications using BiSync
-   				streaming.        
-
-2.0.5   Aug 04, 1999 		CHDLC initialization bug fix.
-				PPP interrupt driven driver: 
-  				Fix to the PPP line hangup problem.
-				New PPP firmware
-				Added comments to the startup SYSTEM ERROR messages
-				Xpipemon debugging application for the X25 protocol
-				New USER_MANUAL.txt
-				Fixed the odd boundary 4byte writes to the board.
-				BiSync Streaming code has been taken out.  
-				 Available as a patch.
-				Streaming HDLC API has been taken out.  
-				 Available as a patch.                 
-
-2.0.6   Aug 17, 1999		Increased debugging in statup scripts
-				Fixed installation bugs from 2.0.5
-				Kernel patch works for both 2.2.10 and 2.2.11 kernels.
-				There is no functional difference between the two packages         
-
-2.0.7   Aug 26, 1999		o  Merged X25API code into WANPIPE.
-				o  Fixed a memory leak for X25API
-				o  Updated the X25API code for 2.2.X kernels.
-				o  Improved NEM handling.   
-
-2.1.0	Oct 25, 1999		o New code for S514 PCI Card
-				o New CHDLC and Frame Relay drivers
-				o PPP and X25 are not supported in this release    
-
-2.1.1	Nov 30, 1999		o PPP support for S514 PCI Cards
-
-2.1.3   Apr 06, 2000		o Socket based x25api 
-				o Socket based chdlc api
-				o Socket based fr api
-				o Dual Port Receive only CHDLC support.
-				o Asynchronous CHDLC support (Secondary Port)
-				o cfgft1 GUI csu/dsu configurator
-				o wancfg GUI configuration file 
-				  configurator.
-				o Architectural directory changes.
-
-beta-2.1.4 Jul 2000		o Dynamic interface configuration:
-					Network interfaces reflect the state
-					of protocol layer.  If the protocol becomes
-					disconnected, driver will bring down
-					the interface.  Once the protocol reconnects
-					the interface will be brought up. 
-					
-					Note: This option is turned off by default.
-
-				o Dynamic wanrouter setup using 'wanconfig':
-					wanconfig utility can be used to
-					shutdown,restart,start or reconfigure 
-					a virtual circuit dynamically.
-				     
-					Frame Relay:  Each DLCI can be: 
-						      created,stopped,restarted and reconfigured
-						      dynamically using wanconfig.
-					
-						      ex: wanconfig card wanpipe1 dev wp1_fr16 up
-				  
-				o Wanrouter startup via command line arguments:
-					wanconfig also supports wanrouter startup via command line
-					arguments.  Thus, there is no need to create a wanpipe#.conf
-					configuration file.  
-
-				o Socket based x25api update/bug fixes.
-					Added support for LCN numbers greater than 255.
-					Option to pass up modem messages.
-					Provided a PCI IRQ check, so a single S514
-					card is guaranteed to have a non-sharing interrupt.
-
-				o Fixes to the wancfg utility.
-				o New FT1 debugging support via *pipemon utilities.
-				o Frame Relay ARP support Enabled.
-
-beta3-2.1.4 Jul 2000		o X25 M_BIT Problem fix.
-				o Added the Multi-Port PPP
-				  Updated utilities for the Multi-Port PPP.
-
-2.1.4	Aut 2000
-				o In X25API:
-					Maximum packet an application can send
-					to the driver has been extended to 4096 bytes.
-
-					Fixed the x25 startup bug. Enable 
-					communications only after all interfaces
-					come up.  HIGH SVC/PVC is used to calculate
-					the number of channels.
-					Enable protocol only after all interfaces
-					are enabled.
-
-				o Added an extra state to the FT1 config, kernel module.
-				o Updated the pipemon debuggers.
-
-				o Blocked the Multi-Port PPP from running on kernels
-				  2.2.16 or greater, due to syncppp kernel module
-				  change. 
-	  
-beta1-2.1.5 	Nov 15 2000
-				o Fixed the MultiPort PPP Support for kernels 2.2.16 and above.
-				  2.2.X kernels only
-
-				o Secured the driver UDP debugging calls
-					- All illegal network debugging calls are reported to
-					  the log.
-					- Defined a set of allowed commands, all other denied.
-					
-				o Cpipemon
-					- Added set FT1 commands to the cpipemon. Thus CSU/DSU
-					  configuration can be performed using cpipemon.
-					  All systems that cannot run cfgft1 GUI utility should
-					  use cpipemon to configure the on board CSU/DSU.
-
-
-				o Keyboard Led Monitor/Debugger
-					- A new utility /usr/sbin/wpkbdmon uses keyboard leds
-					  to convey operational statistic information of the 
-					  Sangoma WANPIPE cards.
-					NUM_LOCK    = Line State  (On=connected,    Off=disconnected)
-					CAPS_LOCK   = Tx data     (On=transmitting, Off=no tx data)
-					SCROLL_LOCK = Rx data     (On=receiving,    Off=no rx data
-					
-				o Hardware probe on module load and dynamic device allocation
-					- During WANPIPE module load, all Sangoma cards are probed
-					  and found information is printed in the /var/log/messages.
-					- If no cards are found, the module load fails.
-					- Appropriate number of devices are dynamically loaded 
-					  based on the number of Sangoma cards found.
-
-					  Note: The kernel configuration option 
-						CONFIG_WANPIPE_CARDS has been taken out.
-					
-				o Fixed the Frame Relay and Chdlc network interfaces so they are
-				  compatible with libpcap libraries.  Meaning, tcpdump, snort,
-				  ethereal, and all other packet sniffers and debuggers work on
-				  all WANPIPE network interfaces.
-					- Set the network interface encoding type to ARPHRD_PPP.
-					  This tell the sniffers that data obtained from the
-					  network interface is in pure IP format.
-				  Fix for 2.2.X kernels only.
-				
-				o True interface encoding option for Frame Relay and CHDLC
-					- The above fix sets the network interface encoding
-					  type to ARPHRD_PPP, however some customers use
-					  the encoding interface type to determine the
-					  protocol running.  Therefore, the TURE ENCODING
-					  option will set the interface type back to the
-					  original value.  
-
-					  NOTE: If this option is used with Frame Relay and CHDLC
-						libpcap library support will be broken.  
-						i.e. tcpdump will not work.
-					Fix for 2.2.x Kernels only.
-						
-				o Ethernet Bridgind over Frame Relay
-					- The Frame Relay bridging has been developed by 
-					  Kristian Hoffmann and Mark Wells.  
-					- The Linux kernel bridge is used to send ethernet 
-					  data over the frame relay links.
-					For 2.2.X Kernels only.
-
-				o Added extensive 2.0.X support. Most new features of
-				  2.1.5 for protocols Frame Relay, PPP and CHDLC are
-				  supported under 2.0.X kernels. 
-
-beta1-2.2.0 	Dec 30 2000
-				o Updated drivers for 2.4.X kernels.
-				o Updated drivers for SMP support.
-				o X25API is now able to share PCI interrupts.
-				o Took out a general polling routine that was used
-				  only by X25API. 
-				o Added appropriate locks to the dynamic reconfiguration
-				  code.
-				o Fixed a bug in the keyboard debug monitor.
-
-beta2-2.2.0	Jan 8 2001
-				o Patches for 2.4.0 kernel
-				o Patches for 2.2.18 kernel
-				o Minor updates to PPP and CHLDC drivers.
-				  Note: No functional difference.
-
-beta3-2.2.9	Jan 10 2001
-				o I missed the 2.2.18 kernel patches in beta2-2.2.0
-				  release.  They are included in this release.
-
-Stable Release
-2.2.0		Feb 01 2001
-				o Bug fix in wancfg GUI configurator.
-					The edit function didn't work properly.
-
-
-bata1-2.2.1	Feb 09 2001
-			o WANPIPE TTY Driver emulation. 
-			  Two modes of operation Sync and Async.
-				Sync: Using the PPPD daemon, kernel SyncPPP layer
-				      and the Wanpipe sync TTY driver: a PPP protocol 
-				      connection can be established via Sangoma adapter, over
-				      a T1 leased line.
-			
-				      The 2.4.0 kernel PPP layer supports MULTILINK
-				      protocol, that can be used to bundle any number of Sangoma
-				      adapters (T1 lines) into one, under a single IP address.
-				      Thus, efficiently obtaining multiple T1 throughput. 
-
-				      NOTE: The remote side must also implement MULTILINK PPP
-					    protocol.
-
-				Async:Using the PPPD daemon, kernel AsyncPPP layer
-				      and the WANPIPE async TTY driver: a PPP protocol
-				      connection can be established via Sangoma adapter and
-				      a modem, over a telephone line.
-
-				      Thus, the WANPIPE async TTY driver simulates a serial
-				      TTY driver that would normally be used to interface the 
-				      MODEM to the linux kernel.
-				
-			o WANPIPE PPP Backup Utility
-				This utility will monitor the state of the PPP T1 line.
-				In case of failure, a dial up connection will be established
-				via pppd daemon, ether via a serial tty driver (serial port), 
-				or a WANPIPE async TTY driver (in case serial port is unavailable).
-				
-				Furthermore, while in dial up mode, the primary PPP T1 link
-				will be monitored for signs of life.  
-
-				If the PPP T1 link comes back to life, the dial up connection
-				will be shutdown and T1 line re-established.
-			
-
-			o New Setup installation script.
-				Option to UPGRADE device drivers if the kernel source has
-				already been patched with WANPIPE.
-
-				Option to COMPILE WANPIPE modules against the currently 
-				running kernel, thus no need for manual kernel and module
-				re-compilation.
-			
-			o Updates and Bug Fixes to wancfg utility.
-
-bata2-2.2.1	Feb 20 2001
-
-			o Bug fixes to the CHDLC device drivers.
-				The driver had compilation problems under kernels
-				2.2.14 or lower.
-
-			o Bug fixes to the Setup installation script.
-				The device drivers compilation options didn't work
-				properly.
-
-			o Update to the wpbackupd daemon.  
-				Optimized the cross-over times, between the primary
-				link and the backup dialup.
-
-beta3-2.2.1	Mar 02 2001
-			o Patches for 2.4.2 kernel.
-
-			o Bug fixes to util/ make files.
-			o Bug fixes to the Setup installation script.
-
-			o Took out the backupd support and made it into
-			  as separate package.
-			  
-beta4-2.2.1     Mar 12 2001
-
-		o Fix to the Frame Relay Device driver.
-			IPSAC sends a packet of zero length
-			header to the frame relay driver.  The
-			driver tries to push its own 2 byte header
-			into the packet, which causes the driver to
-			crash.
-
-		o Fix the WANPIPE re-configuration code.
-			Bug was found by trying to run  the cfgft1 while the
-			interface was already running.  
-
-		o Updates to cfgft1.
-			Writes a wanpipe#.cfgft1 configuration file
-			once the CSU/DSU is configured. This file can
-			holds the current CSU/DSU configuration.
-
-
-
->>>>>> END OF README <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
-
-

+ 1 - 16
MAINTAINERS

@@ -840,15 +840,6 @@ L:	linux-wireless@vger.kernel.org
 W:	http://linuxwireless.org/en/users/Drivers/b43
 S:	Maintained
 
-BCM43XX WIRELESS DRIVER (SOFTMAC BASED VERSION)
-P:	Larry Finger
-M:	Larry.Finger@lwfinger.net
-P:	Stefano Brivio
-M:	stefano.brivio@polimi.it
-L:	linux-wireless@vger.kernel.org
-W:	http://bcm43xx.berlios.de/
-S:	Obsolete
-
 BEFS FILE SYSTEM
 P:	Sergey S. Kostyliov
 M:	rathamahata@php4.ru
@@ -3479,7 +3470,7 @@ P:	Vlad Yasevich
 M:	vladislav.yasevich@hp.com
 P:	Sridhar Samudrala
 M:	sri@us.ibm.com
-L:	lksctp-developers@lists.sourceforge.net
+L:	linux-sctp@vger.kernel.org
 W:	http://lksctp.sourceforge.net
 S:	Supported
 
@@ -3613,12 +3604,6 @@ M:	mhoffman@lightlink.com
 L:	lm-sensors@lm-sensors.org
 S:	Maintained
 
-SOFTMAC LAYER (IEEE 802.11)
-P:	Daniel Drake
-M:	dsd@gentoo.org
-L:	linux-wireless@vger.kernel.org
-S:	Obsolete
-
 SOFTWARE RAID (Multiple Disks) SUPPORT
 P:	Ingo Molnar
 M:	mingo@redhat.com

+ 1 - 1
arch/ia64/hp/sim/simeth.c

@@ -294,7 +294,7 @@ simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr)
 		return NOTIFY_DONE;
 	}
 
-	if (dev->nd_net != &init_net)
+	if (dev_net(dev) != &init_net)
 		return NOTIFY_DONE;
 
 	if ( event != NETDEV_UP && event != NETDEV_DOWN ) return NOTIFY_DONE;

+ 1 - 1
arch/powerpc/platforms/82xx/ep8248e.c

@@ -138,7 +138,7 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev,
 
 	bus->name = "ep8248e-mdio-bitbang";
 	bus->dev = &ofdev->dev;
-	bus->id = res.start;
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
 
 	return mdiobus_register(bus);
 }

+ 1 - 1
arch/powerpc/platforms/pasemi/gpio_mdio.c

@@ -241,7 +241,7 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev,
 	new_bus->reset = &gpio_mdio_reset;
 
 	prop = of_get_property(np, "reg", NULL);
-	new_bus->id = *prop;
+	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", *prop);
 	new_bus->priv = priv;
 
 	new_bus->phy_mask = 0;

+ 3 - 2
arch/powerpc/sysdev/fsl_soc.c

@@ -341,7 +341,7 @@ static int __init gfar_of_init(void)
 				goto unreg;
 			}
 
-			gfar_data.bus_id = 0;
+			snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "0");
 			gfar_data.phy_id = fixed_link[0];
 		} else {
 			phy = of_find_node_by_phandle(*ph);
@@ -362,7 +362,8 @@ static int __init gfar_of_init(void)
 			}
 
 			gfar_data.phy_id = *id;
-			gfar_data.bus_id = res.start;
+			snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "%x",
+					res.start);
 
 			of_node_put(phy);
 			of_node_put(mdio);

+ 3 - 5
arch/s390/defconfig

@@ -538,11 +538,9 @@ CONFIG_CTC=m
 # CONFIG_SMSGIUCV is not set
 # CONFIG_CLAW is not set
 CONFIG_QETH=y
-
-#
-# Gigabit Ethernet default settings
-#
-# CONFIG_QETH_IPV6 is not set
+CONFIG_QETH_L2=y
+CONFIG_QETH_L3=y
+CONFIG_QETH_IPV6=y
 CONFIG_CCWGROUP=y
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set

+ 11 - 11
drivers/atm/ambassador.c

@@ -437,7 +437,7 @@ static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * sk
 
 /* see limitations under Hardware Features */
 
-static inline int check_area (void * start, size_t length) {
+static int check_area (void * start, size_t length) {
   // assumes length > 0
   const u32 fourmegmask = -1 << 22;
   const u32 twofivesixmask = -1 << 8;
@@ -456,7 +456,7 @@ static inline int check_area (void * start, size_t length) {
 
 /********** free an skb (as per ATM device driver documentation) **********/
 
-static inline void amb_kfree_skb (struct sk_buff * skb) {
+static void amb_kfree_skb (struct sk_buff * skb) {
   if (ATM_SKB(skb)->vcc->pop) {
     ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
   } else {
@@ -466,7 +466,7 @@ static inline void amb_kfree_skb (struct sk_buff * skb) {
 
 /********** TX completion **********/
 
-static inline void tx_complete (amb_dev * dev, tx_out * tx) {
+static void tx_complete (amb_dev * dev, tx_out * tx) {
   tx_simple * tx_descr = bus_to_virt (tx->handle);
   struct sk_buff * skb = tx_descr->skb;
   
@@ -643,7 +643,7 @@ static int command_do (amb_dev * dev, command * cmd) {
 
 /********** TX queue pair **********/
 
-static inline int tx_give (amb_dev * dev, tx_in * tx) {
+static int tx_give (amb_dev * dev, tx_in * tx) {
   amb_txq * txq = &dev->txq;
   unsigned long flags;
   
@@ -675,7 +675,7 @@ static inline int tx_give (amb_dev * dev, tx_in * tx) {
   }
 }
 
-static inline int tx_take (amb_dev * dev) {
+static int tx_take (amb_dev * dev) {
   amb_txq * txq = &dev->txq;
   unsigned long flags;
   
@@ -703,7 +703,7 @@ static inline int tx_take (amb_dev * dev) {
 
 /********** RX queue pairs **********/
 
-static inline int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
+static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
   amb_rxq * rxq = &dev->rxq[pool];
   unsigned long flags;
   
@@ -728,7 +728,7 @@ static inline int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
   }
 }
 
-static inline int rx_take (amb_dev * dev, unsigned char pool) {
+static int rx_take (amb_dev * dev, unsigned char pool) {
   amb_rxq * rxq = &dev->rxq[pool];
   unsigned long flags;
   
@@ -761,7 +761,7 @@ static inline int rx_take (amb_dev * dev, unsigned char pool) {
 /********** RX Pool handling **********/
 
 /* pre: buffers_wanted = 0, post: pending = 0 */
-static inline void drain_rx_pool (amb_dev * dev, unsigned char pool) {
+static void drain_rx_pool (amb_dev * dev, unsigned char pool) {
   amb_rxq * rxq = &dev->rxq[pool];
   
   PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool);
@@ -796,7 +796,7 @@ static void drain_rx_pools (amb_dev * dev) {
     drain_rx_pool (dev, pool);
 }
 
-static inline void fill_rx_pool (amb_dev * dev, unsigned char pool,
+static void fill_rx_pool (amb_dev * dev, unsigned char pool,
                                  gfp_t priority)
 {
   rx_in rx;
@@ -846,7 +846,7 @@ static void fill_rx_pools (amb_dev * dev) {
 
 /********** enable host interrupts **********/
 
-static inline void interrupts_on (amb_dev * dev) {
+static void interrupts_on (amb_dev * dev) {
   wr_plain (dev, offsetof(amb_mem, interrupt_control),
 	    rd_plain (dev, offsetof(amb_mem, interrupt_control))
 	    | AMB_INTERRUPT_BITS);
@@ -854,7 +854,7 @@ static inline void interrupts_on (amb_dev * dev) {
 
 /********** disable host interrupts **********/
 
-static inline void interrupts_off (amb_dev * dev) {
+static void interrupts_off (amb_dev * dev) {
   wr_plain (dev, offsetof(amb_mem, interrupt_control),
 	    rd_plain (dev, offsetof(amb_mem, interrupt_control))
 	    &~ AMB_INTERRUPT_BITS);

+ 12 - 12
drivers/atm/horizon.c

@@ -424,7 +424,7 @@ static inline void FLUSH_RX_CHANNEL (hrz_dev * dev, u16 channel) {
   return;
 }
 
-static inline void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) {
+static void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) {
   while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & FLUSH_CHANNEL)
     ;
   return;
@@ -435,7 +435,7 @@ static inline void SELECT_RX_CHANNEL (hrz_dev * dev, u16 channel) {
   return;
 }
 
-static inline void WAIT_UPDATE_COMPLETE (hrz_dev * dev) {
+static void WAIT_UPDATE_COMPLETE (hrz_dev * dev) {
   while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & RX_CHANNEL_UPDATE_IN_PROGRESS)
     ;
   return;
@@ -796,7 +796,7 @@ static void hrz_change_vc_qos (ATM_RXER * rxer, MAAL_QOS * qos) {
 
 /********** free an skb (as per ATM device driver documentation) **********/
 
-static inline void hrz_kfree_skb (struct sk_buff * skb) {
+static void hrz_kfree_skb (struct sk_buff * skb) {
   if (ATM_SKB(skb)->vcc->pop) {
     ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
   } else {
@@ -1076,7 +1076,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
 
 /********** handle RX bus master complete events **********/
 
-static inline void rx_bus_master_complete_handler (hrz_dev * dev) {
+static void rx_bus_master_complete_handler (hrz_dev * dev) {
   if (test_bit (rx_busy, &dev->flags)) {
     rx_schedule (dev, 1);
   } else {
@@ -1089,7 +1089,7 @@ static inline void rx_bus_master_complete_handler (hrz_dev * dev) {
 
 /********** (queue to) become the next TX thread **********/
 
-static inline int tx_hold (hrz_dev * dev) {
+static int tx_hold (hrz_dev * dev) {
   PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags);
   wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags)));
   PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags);
@@ -1232,7 +1232,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
 
 /********** handle TX bus master complete events **********/
 
-static inline void tx_bus_master_complete_handler (hrz_dev * dev) {
+static void tx_bus_master_complete_handler (hrz_dev * dev) {
   if (test_bit (tx_busy, &dev->flags)) {
     tx_schedule (dev, 1);
   } else {
@@ -1246,7 +1246,7 @@ static inline void tx_bus_master_complete_handler (hrz_dev * dev) {
 /********** move RX Q pointer to next item in circular buffer **********/
 
 // called only from IRQ sub-handler
-static inline u32 rx_queue_entry_next (hrz_dev * dev) {
+static u32 rx_queue_entry_next (hrz_dev * dev) {
   u32 rx_queue_entry;
   spin_lock (&dev->mem_lock);
   rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry);
@@ -1270,7 +1270,7 @@ static inline void rx_disabled_handler (hrz_dev * dev) {
 /********** handle RX data received by device **********/
 
 // called from IRQ handler
-static inline void rx_data_av_handler (hrz_dev * dev) {
+static void rx_data_av_handler (hrz_dev * dev) {
   u32 rx_queue_entry;
   u32 rx_queue_entry_flags;
   u16 rx_len;
@@ -1394,7 +1394,7 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
   irq_ok = 0;
   while ((int_source = rd_regl (dev, INT_SOURCE_REG_OFF)
 	  & INTERESTING_INTERRUPTS)) {
-    // In the interests of fairness, the (inline) handlers below are
+    // In the interests of fairness, the handlers below are
     // called in sequence and without immediate return to the head of
     // the while loop. This is only of issue for slow hosts (or when
     // debugging messages are on). Really slow hosts may find a fast
@@ -1458,7 +1458,7 @@ static void do_housekeeping (unsigned long arg) {
 /********** find an idle channel for TX and set it up **********/
 
 // called with tx_busy set
-static inline short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
+static short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
   unsigned short idle_channels;
   short tx_channel = -1;
   unsigned int spin_count;
@@ -1777,13 +1777,13 @@ static void hrz_reset (const hrz_dev * dev) {
 
 /********** read the burnt in address **********/
 
-static inline void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl)
+static void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl)
 {
 	wr_regl (dev, CONTROL_0_REG, ctrl);
 	udelay (5);
 }
   
-static inline void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
+static void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
 {
 	// DI must be valid around rising SK edge
 	WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK);

+ 1 - 1
drivers/block/aoe/aoenet.c

@@ -115,7 +115,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
 	struct aoe_hdr *h;
 	u32 n;
 
-	if (ifp->nd_net != &init_net)
+	if (dev_net(ifp) != &init_net)
 		goto exit;
 
 	skb = skb_share_check(skb, GFP_ATOMIC);

+ 372 - 357
drivers/net/3c509.c

@@ -54,25 +54,24 @@
 		v1.19a 28Oct2002 Davud Ruggiero <jdr@farfalle.com>
 			- Increase *read_eeprom udelay to workaround oops with 2 cards.
 		v1.19b 08Nov2002 Marc Zyngier <maz@wild-wind.fr.eu.org>
-		    - Introduce driver model for EISA cards.
+			- Introduce driver model for EISA cards.
+		v1.20  04Feb2008 Ondrej Zary <linux@rainbow-software.org>
+			- convert to isa_driver and pnp_driver and some cleanups
 */
 
 #define DRV_NAME	"3c509"
-#define DRV_VERSION	"1.19b"
-#define DRV_RELDATE	"08Nov2002"
+#define DRV_VERSION	"1.20"
+#define DRV_RELDATE	"04Feb2008"
 
 /* A few values that may be tweaked. */
 
 /* Time in jiffies before concluding the transmitter is hung. */
 #define TX_TIMEOUT  (400*HZ/1000)
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static int max_interrupt_work = 10;
 
 #include <linux/module.h>
-#ifdef CONFIG_MCA
 #include <linux/mca.h>
-#endif
-#include <linux/isapnp.h>
+#include <linux/isa.h>
+#include <linux/pnp.h>
 #include <linux/string.h>
 #include <linux/interrupt.h>
 #include <linux/errno.h>
@@ -97,10 +96,6 @@ static int max_interrupt_work = 10;
 
 static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
 
-#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA))
-#define EL3_SUSPEND
-#endif
-
 #ifdef EL3_DEBUG
 static int el3_debug = EL3_DEBUG;
 #else
@@ -111,6 +106,7 @@ static int el3_debug = 2;
  * a global variable so that the mca/eisa probe routines can increment
  * it */
 static int el3_cards = 0;
+#define EL3_MAX_CARDS 8
 
 /* To minimize the size of the driver source I only define operating
    constants if they are used several times.  You'll need the manual
@@ -119,7 +115,7 @@ static int el3_cards = 0;
 #define EL3_DATA 0x00
 #define EL3_CMD 0x0e
 #define EL3_STATUS 0x0e
-#define	 EEPROM_READ 0x80
+#define	EEPROM_READ 0x80
 
 #define EL3_IO_EXTENT	16
 
@@ -168,23 +164,31 @@ enum RxFilter {
  */
 #define SKB_QUEUE_SIZE	64
 
+enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA };
+
 struct el3_private {
 	struct net_device_stats stats;
-	struct net_device *next_dev;
 	spinlock_t lock;
 	/* skb send-queue */
 	int head, size;
 	struct sk_buff *queue[SKB_QUEUE_SIZE];
-	enum {
-		EL3_MCA,
-		EL3_PNP,
-		EL3_EISA,
-	} type;						/* type of device */
-	struct device *dev;
+	enum el3_cardtype type;
 };
-static int id_port __initdata = 0x110;	/* Start with 0x110 to avoid new sound cards.*/
-static struct net_device *el3_root_dev;
+static int id_port;
+static int current_tag;
+static struct net_device *el3_devs[EL3_MAX_CARDS];
+
+/* Parameters that may be passed into the module. */
+static int debug = -1;
+static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 10;
+#ifdef CONFIG_PNP
+static int nopnp;
+#endif
 
+static int __init el3_common_init(struct net_device *dev);
+static void el3_common_remove(struct net_device *dev);
 static ushort id_read_eeprom(int index);
 static ushort read_eeprom(int ioaddr, int index);
 static int el3_open(struct net_device *dev);
@@ -199,7 +203,7 @@ static void el3_tx_timeout (struct net_device *dev);
 static void el3_down(struct net_device *dev);
 static void el3_up(struct net_device *dev);
 static const struct ethtool_ops ethtool_ops;
-#ifdef EL3_SUSPEND
+#ifdef CONFIG_PM
 static int el3_suspend(struct device *, pm_message_t);
 static int el3_resume(struct device *);
 #else
@@ -209,13 +213,272 @@ static int el3_resume(struct device *);
 
 
 /* generic device remove for all device types */
-#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
 static int el3_device_remove (struct device *device);
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void el3_poll_controller(struct net_device *dev);
 #endif
 
+/* Return 0 on success, 1 on error, 2 when found already detected PnP card */
+static int el3_isa_id_sequence(__be16 *phys_addr)
+{
+	short lrs_state = 0xff;
+	int i;
+
+	/* ISA boards are detected by sending the ID sequence to the
+	   ID_PORT.  We find cards past the first by setting the 'current_tag'
+	   on cards as they are found.  Cards with their tag set will not
+	   respond to subsequent ID sequences. */
+
+	outb(0x00, id_port);
+	outb(0x00, id_port);
+	for (i = 0; i < 255; i++) {
+		outb(lrs_state, id_port);
+		lrs_state <<= 1;
+		lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
+	}
+	/* For the first probe, clear all board's tag registers. */
+	if (current_tag == 0)
+		outb(0xd0, id_port);
+	else			/* Otherwise kill off already-found boards. */
+		outb(0xd8, id_port);
+	if (id_read_eeprom(7) != 0x6d50)
+		return 1;
+	/* Read in EEPROM data, which does contention-select.
+	   Only the lowest address board will stay "on-line".
+	   3Com got the byte order backwards. */
+	for (i = 0; i < 3; i++)
+		phys_addr[i] = htons(id_read_eeprom(i));
+#ifdef CONFIG_PNP
+	if (!nopnp) {
+		/* The ISA PnP 3c509 cards respond to the ID sequence too.
+		   This check is needed in order not to register them twice. */
+		for (i = 0; i < el3_cards; i++) {
+			struct el3_private *lp = netdev_priv(el3_devs[i]);
+			if (lp->type == EL3_PNP
+			    && !memcmp(phys_addr, el3_devs[i]->dev_addr,
+				       ETH_ALEN)) {
+				if (el3_debug > 3)
+					printk(KERN_DEBUG "3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
+						phys_addr[0] & 0xff, phys_addr[0] >> 8,
+						phys_addr[1] & 0xff, phys_addr[1] >> 8,
+						phys_addr[2] & 0xff, phys_addr[2] >> 8);
+				/* Set the adaptor tag so that the next card can be found. */
+				outb(0xd0 + ++current_tag, id_port);
+				return 2;
+			}
+		}
+	}
+#endif /* CONFIG_PNP */
+	return 0;
+
+}
+
+static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr,
+				   int ioaddr, int irq, int if_port,
+				   enum el3_cardtype type)
+{
+	struct el3_private *lp = netdev_priv(dev);
+
+	memcpy(dev->dev_addr, phys_addr, ETH_ALEN);
+	dev->base_addr = ioaddr;
+	dev->irq = irq;
+	dev->if_port = if_port;
+	lp->type = type;
+}
+
+static int __devinit el3_isa_match(struct device *pdev,
+				   unsigned int ndev)
+{
+	struct net_device *dev;
+	int ioaddr, isa_irq, if_port, err;
+	unsigned int iobase;
+	__be16 phys_addr[3];
+
+	while ((err = el3_isa_id_sequence(phys_addr)) == 2)
+		;	/* Skip to next card when PnP card found */
+	if (err == 1)
+		return 0;
+
+	iobase = id_read_eeprom(8);
+	if_port = iobase >> 14;
+	ioaddr = 0x200 + ((iobase & 0x1f) << 4);
+	if (irq[el3_cards] > 1 && irq[el3_cards] < 16)
+		isa_irq = irq[el3_cards];
+	else
+		isa_irq = id_read_eeprom(9) >> 12;
+
+	dev = alloc_etherdev(sizeof(struct el3_private));
+	if (!dev)
+		return -ENOMEM;
+
+	netdev_boot_setup_check(dev);
+
+	if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
+		free_netdev(dev);
+		return 0;
+	}
+
+	/* Set the adaptor tag so that the next card can be found. */
+	outb(0xd0 + ++current_tag, id_port);
+
+	/* Activate the adaptor at the EEPROM location. */
+	outb((ioaddr >> 4) | 0xe0, id_port);
+
+	EL3WINDOW(0);
+	if (inw(ioaddr) != 0x6d50) {
+		free_netdev(dev);
+		return 0;
+	}
+
+	/* Free the interrupt so that some other card can use it. */
+	outw(0x0f00, ioaddr + WN0_IRQ);
+
+	el3_dev_fill(dev, phys_addr, ioaddr, isa_irq, if_port, EL3_ISA);
+	dev_set_drvdata(pdev, dev);
+	if (el3_common_init(dev)) {
+		free_netdev(dev);
+		return 0;
+	}
+
+	el3_devs[el3_cards++] = dev;
+	return 1;
+}
+
+static int __devexit el3_isa_remove(struct device *pdev,
+				    unsigned int ndev)
+{
+	el3_device_remove(pdev);
+	dev_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int el3_isa_suspend(struct device *dev, unsigned int n,
+			   pm_message_t state)
+{
+	current_tag = 0;
+	return el3_suspend(dev, state);
+}
+
+static int el3_isa_resume(struct device *dev, unsigned int n)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+	int ioaddr = ndev->base_addr, err;
+	__be16 phys_addr[3];
+
+	while ((err = el3_isa_id_sequence(phys_addr)) == 2)
+		;	/* Skip to next card when PnP card found */
+	if (err == 1)
+		return 0;
+	/* Set the adaptor tag so that the next card can be found. */
+	outb(0xd0 + ++current_tag, id_port);
+	/* Enable the card */
+	outb((ioaddr >> 4) | 0xe0, id_port);
+	EL3WINDOW(0);
+	if (inw(ioaddr) != 0x6d50)
+		return 1;
+	/* Free the interrupt so that some other card can use it. */
+	outw(0x0f00, ioaddr + WN0_IRQ);
+	return el3_resume(dev);
+}
+#endif
+
+static struct isa_driver el3_isa_driver = {
+	.match		= el3_isa_match,
+	.remove		= __devexit_p(el3_isa_remove),
+#ifdef CONFIG_PM
+	.suspend	= el3_isa_suspend,
+	.resume		= el3_isa_resume,
+#endif
+	.driver		= {
+		.name	= "3c509"
+	},
+};
+static int isa_registered;
+
+#ifdef CONFIG_PNP
+static struct pnp_device_id el3_pnp_ids[] = {
+	{ .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
+	{ .id = "TCM5091" }, /* 3Com Etherlink III */
+	{ .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
+	{ .id = "TCM5095" }, /* 3Com Etherlink III (TPO) */
+	{ .id = "TCM5098" }, /* 3Com Etherlink III (TPC) */
+	{ .id = "PNP80f7" }, /* 3Com Etherlink III compatible */
+	{ .id = "PNP80f8" }, /* 3Com Etherlink III compatible */
+	{ .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, el3_pnp_ids);
+
+static int __devinit el3_pnp_probe(struct pnp_dev *pdev,
+				    const struct pnp_device_id *id)
+{
+	short i;
+	int ioaddr, irq, if_port;
+	u16 phys_addr[3];
+	struct net_device *dev = NULL;
+	int err;
+
+	ioaddr = pnp_port_start(pdev, 0);
+	if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-pnp"))
+		return -EBUSY;
+	irq = pnp_irq(pdev, 0);
+	EL3WINDOW(0);
+	for (i = 0; i < 3; i++)
+		phys_addr[i] = htons(read_eeprom(ioaddr, i));
+	if_port = read_eeprom(ioaddr, 8) >> 14;
+	dev = alloc_etherdev(sizeof(struct el3_private));
+	if (!dev) {
+		release_region(ioaddr, EL3_IO_EXTENT);
+		return -ENOMEM;
+	}
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	netdev_boot_setup_check(dev);
+
+	el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP);
+	pnp_set_drvdata(pdev, dev);
+	err = el3_common_init(dev);
+
+	if (err) {
+		pnp_set_drvdata(pdev, NULL);
+		free_netdev(dev);
+		return err;
+	}
+
+	el3_devs[el3_cards++] = dev;
+	return 0;
+}
+
+static void __devexit el3_pnp_remove(struct pnp_dev *pdev)
+{
+	el3_common_remove(pnp_get_drvdata(pdev));
+	pnp_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int el3_pnp_suspend(struct pnp_dev *pdev, pm_message_t state)
+{
+	return el3_suspend(&pdev->dev, state);
+}
+
+static int el3_pnp_resume(struct pnp_dev *pdev)
+{
+	return el3_resume(&pdev->dev);
+}
+#endif
+
+static struct pnp_driver el3_pnp_driver = {
+	.name		= "3c509",
+	.id_table	= el3_pnp_ids,
+	.probe		= el3_pnp_probe,
+	.remove		= __devexit_p(el3_pnp_remove),
+#ifdef CONFIG_PM
+	.suspend	= el3_pnp_suspend,
+	.resume		= el3_pnp_resume,
+#endif
+};
+static int pnp_registered;
+#endif /* CONFIG_PNP */
+
 #ifdef CONFIG_EISA
 static struct eisa_device_id el3_eisa_ids[] = {
 		{ "TCM5092" },
@@ -230,13 +493,14 @@ static int el3_eisa_probe (struct device *device);
 static struct eisa_driver el3_eisa_driver = {
 		.id_table = el3_eisa_ids,
 		.driver   = {
-				.name    = "3c509",
+				.name    = "3c579",
 				.probe   = el3_eisa_probe,
 				.remove  = __devexit_p (el3_device_remove),
 				.suspend = el3_suspend,
 				.resume  = el3_resume,
 		}
 };
+static int eisa_registered;
 #endif
 
 #ifdef CONFIG_MCA
@@ -271,45 +535,9 @@ static struct mca_driver el3_mca_driver = {
 				.resume  = el3_resume,
 		},
 };
+static int mca_registered;
 #endif /* CONFIG_MCA */
 
-#if defined(__ISAPNP__)
-static struct isapnp_device_id el3_isapnp_adapters[] __initdata = {
-	{	ISAPNP_ANY_ID, ISAPNP_ANY_ID,
-		ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5090),
-		(long) "3Com Etherlink III (TP)" },
-	{	ISAPNP_ANY_ID, ISAPNP_ANY_ID,
-		ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5091),
-		(long) "3Com Etherlink III" },
-	{	ISAPNP_ANY_ID, ISAPNP_ANY_ID,
-		ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5094),
-		(long) "3Com Etherlink III (combo)" },
-	{	ISAPNP_ANY_ID, ISAPNP_ANY_ID,
-		ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5095),
-		(long) "3Com Etherlink III (TPO)" },
-	{	ISAPNP_ANY_ID, ISAPNP_ANY_ID,
-		ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5098),
-		(long) "3Com Etherlink III (TPC)" },
-	{	ISAPNP_ANY_ID, ISAPNP_ANY_ID,
-		ISAPNP_VENDOR('P', 'N', 'P'), ISAPNP_FUNCTION(0x80f7),
-		(long) "3Com Etherlink III compatible" },
-	{	ISAPNP_ANY_ID, ISAPNP_ANY_ID,
-		ISAPNP_VENDOR('P', 'N', 'P'), ISAPNP_FUNCTION(0x80f8),
-		(long) "3Com Etherlink III compatible" },
-	{ }	/* terminate list */
-};
-
-static __be16 el3_isapnp_phys_addr[8][3];
-static int nopnp;
-#endif /* __ISAPNP__ */
-
-/* With the driver model introduction for EISA devices, both init
- * and cleanup have been split :
- * - EISA devices probe/remove starts in el3_eisa_probe/el3_device_remove
- * - MCA/ISA still use el3_probe
- *
- * Both call el3_common_init/el3_common_remove. */
-
 static int __init el3_common_init(struct net_device *dev)
 {
 	struct el3_private *lp = netdev_priv(dev);
@@ -360,231 +588,11 @@ static int __init el3_common_init(struct net_device *dev)
 
 static void el3_common_remove (struct net_device *dev)
 {
-	struct el3_private *lp = netdev_priv(dev);
-
-	(void) lp;				/* Keep gcc quiet... */
-#if defined(__ISAPNP__)
-	if (lp->type == EL3_PNP)
-		pnp_device_detach(to_pnp_dev(lp->dev));
-#endif
-
 	unregister_netdev (dev);
 	release_region(dev->base_addr, EL3_IO_EXTENT);
 	free_netdev (dev);
 }
 
-static int __init el3_probe(int card_idx)
-{
-	struct net_device *dev;
-	struct el3_private *lp;
-	short lrs_state = 0xff, i;
-	int ioaddr, irq, if_port;
-	__be16 phys_addr[3];
-	static int current_tag;
-	int err = -ENODEV;
-#if defined(__ISAPNP__)
-	static int pnp_cards;
-	struct pnp_dev *idev = NULL;
-	int pnp_found = 0;
-
-	if (nopnp == 1)
-		goto no_pnp;
-
-	for (i=0; el3_isapnp_adapters[i].vendor != 0; i++) {
-		int j;
-		while ((idev = pnp_find_dev(NULL,
-					    el3_isapnp_adapters[i].vendor,
-					    el3_isapnp_adapters[i].function,
-					    idev))) {
-			if (pnp_device_attach(idev) < 0)
-				continue;
-			if (pnp_activate_dev(idev) < 0) {
-__again:
-				pnp_device_detach(idev);
-				continue;
-			}
-			if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0))
-				goto __again;
-			ioaddr = pnp_port_start(idev, 0);
-			if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509 PnP")) {
-				pnp_device_detach(idev);
-				return -EBUSY;
-			}
-			irq = pnp_irq(idev, 0);
-			if (el3_debug > 3)
-				printk ("ISAPnP reports %s at i/o 0x%x, irq %d\n",
-					(char*) el3_isapnp_adapters[i].driver_data, ioaddr, irq);
-			EL3WINDOW(0);
-			for (j = 0; j < 3; j++)
-				el3_isapnp_phys_addr[pnp_cards][j] =
-					phys_addr[j] =
-						htons(read_eeprom(ioaddr, j));
-			if_port = read_eeprom(ioaddr, 8) >> 14;
-			dev = alloc_etherdev(sizeof (struct el3_private));
-			if (!dev) {
-					release_region(ioaddr, EL3_IO_EXTENT);
-					pnp_device_detach(idev);
-					return -ENOMEM;
-			}
-
-			SET_NETDEV_DEV(dev, &idev->dev);
-			pnp_cards++;
-
-			netdev_boot_setup_check(dev);
-			pnp_found = 1;
-			goto found;
-		}
-	}
-no_pnp:
-#endif /* __ISAPNP__ */
-
-	/* Select an open I/O location at 0x1*0 to do contention select. */
-	for ( ; id_port < 0x200; id_port += 0x10) {
-		if (!request_region(id_port, 1, "3c509"))
-			continue;
-		outb(0x00, id_port);
-		outb(0xff, id_port);
-		if (inb(id_port) & 0x01){
-			release_region(id_port, 1);
-			break;
-		} else
-			release_region(id_port, 1);
-	}
-	if (id_port >= 0x200) {
-		/* Rare -- do we really need a warning? */
-		printk(" WARNING: No I/O port available for 3c509 activation.\n");
-		return -ENODEV;
-	}
-
-	/* Next check for all ISA bus boards by sending the ID sequence to the
-	   ID_PORT.  We find cards past the first by setting the 'current_tag'
-	   on cards as they are found.  Cards with their tag set will not
-	   respond to subsequent ID sequences. */
-
-	outb(0x00, id_port);
-	outb(0x00, id_port);
-	for(i = 0; i < 255; i++) {
-		outb(lrs_state, id_port);
-		lrs_state <<= 1;
-		lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
-	}
-
-	/* For the first probe, clear all board's tag registers. */
-	if (current_tag == 0)
-		outb(0xd0, id_port);
-	else				/* Otherwise kill off already-found boards. */
-		outb(0xd8, id_port);
-
-	if (id_read_eeprom(7) != 0x6d50) {
-		return -ENODEV;
-	}
-
-	/* Read in EEPROM data, which does contention-select.
-	   Only the lowest address board will stay "on-line".
-	   3Com got the byte order backwards. */
-	for (i = 0; i < 3; i++) {
-		phys_addr[i] = htons(id_read_eeprom(i));
-	}
-
-#if defined(__ISAPNP__)
-	if (nopnp == 0) {
-		/* The ISA PnP 3c509 cards respond to the ID sequence.
-		   This check is needed in order not to register them twice. */
-		for (i = 0; i < pnp_cards; i++) {
-			if (phys_addr[0] == el3_isapnp_phys_addr[i][0] &&
-			    phys_addr[1] == el3_isapnp_phys_addr[i][1] &&
-			    phys_addr[2] == el3_isapnp_phys_addr[i][2])
-			{
-				if (el3_debug > 3)
-					printk("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
-						phys_addr[0] & 0xff, phys_addr[0] >> 8,
-						phys_addr[1] & 0xff, phys_addr[1] >> 8,
-						phys_addr[2] & 0xff, phys_addr[2] >> 8);
-				/* Set the adaptor tag so that the next card can be found. */
-				outb(0xd0 + ++current_tag, id_port);
-				goto no_pnp;
-			}
-		}
-	}
-#endif /* __ISAPNP__ */
-
-	{
-		unsigned int iobase = id_read_eeprom(8);
-		if_port = iobase >> 14;
-		ioaddr = 0x200 + ((iobase & 0x1f) << 4);
-	}
-	irq = id_read_eeprom(9) >> 12;
-
-	dev = alloc_etherdev(sizeof (struct el3_private));
-	if (!dev)
-		return -ENOMEM;
-
-	netdev_boot_setup_check(dev);
-
-	/* Set passed-in IRQ or I/O Addr. */
-	if (dev->irq > 1  &&  dev->irq < 16)
-			irq = dev->irq;
-
-	if (dev->base_addr) {
-		if (dev->mem_end == 0x3c509 	/* Magic key */
-		    && dev->base_addr >= 0x200  &&  dev->base_addr <= 0x3e0)
-			ioaddr = dev->base_addr & 0x3f0;
-		else if (dev->base_addr != ioaddr)
-			goto out;
-	}
-
-	if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509")) {
-		err = -EBUSY;
-		goto out;
-	}
-
-	/* Set the adaptor tag so that the next card can be found. */
-	outb(0xd0 + ++current_tag, id_port);
-
-	/* Activate the adaptor at the EEPROM location. */
-	outb((ioaddr >> 4) | 0xe0, id_port);
-
-	EL3WINDOW(0);
-	if (inw(ioaddr) != 0x6d50)
-		goto out1;
-
-	/* Free the interrupt so that some other card can use it. */
-	outw(0x0f00, ioaddr + WN0_IRQ);
-
-#if defined(__ISAPNP__)
- found:							/* PNP jumps here... */
-#endif /* __ISAPNP__ */
-
-	memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
-	dev->base_addr = ioaddr;
-	dev->irq = irq;
-	dev->if_port = if_port;
-	lp = netdev_priv(dev);
-#if defined(__ISAPNP__)
-	lp->dev = &idev->dev;
-	if (pnp_found)
-		lp->type = EL3_PNP;
-#endif
-	err = el3_common_init(dev);
-
-	if (err)
-		goto out1;
-
-	el3_cards++;
-	lp->next_dev = el3_root_dev;
-	el3_root_dev = dev;
-	return 0;
-
-out1:
-#if defined(__ISAPNP__)
-	if (idev)
-		pnp_device_detach(idev);
-#endif
-out:
-	free_netdev(dev);
-	return err;
-}
-
 #ifdef CONFIG_MCA
 static int __init el3_mca_probe(struct device *device)
 {
@@ -596,7 +604,6 @@ static int __init el3_mca_probe(struct device *device)
 	 * redone for multi-card detection by ZP Gu (zpg@castle.net)
 	 * now works as a module */
 
-	struct el3_private *lp;
 	short i;
 	int ioaddr, irq, if_port;
 	u16 phys_addr[3];
@@ -613,7 +620,7 @@ static int __init el3_mca_probe(struct device *device)
 	irq = pos5 & 0x0f;
 
 
-	printk("3c529: found %s at slot %d\n",
+	printk(KERN_INFO "3c529: found %s at slot %d\n",
 		   el3_mca_adapter_names[mdev->index], slot + 1);
 
 	/* claim the slot */
@@ -626,7 +633,7 @@ static int __init el3_mca_probe(struct device *device)
 	irq = mca_device_transform_irq(mdev, irq);
 	ioaddr = mca_device_transform_ioport(mdev, ioaddr);
 	if (el3_debug > 2) {
-			printk("3c529: irq %d  ioaddr 0x%x  ifport %d\n", irq, ioaddr, if_port);
+			printk(KERN_DEBUG "3c529: irq %d  ioaddr 0x%x  ifport %d\n", irq, ioaddr, if_port);
 	}
 	EL3WINDOW(0);
 	for (i = 0; i < 3; i++) {
@@ -641,13 +648,7 @@ static int __init el3_mca_probe(struct device *device)
 
 	netdev_boot_setup_check(dev);
 
-	memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
-	dev->base_addr = ioaddr;
-	dev->irq = irq;
-	dev->if_port = if_port;
-	lp = netdev_priv(dev);
-	lp->dev = device;
-	lp->type = EL3_MCA;
+	el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA);
 	device->driver_data = dev;
 	err = el3_common_init(dev);
 
@@ -657,7 +658,7 @@ static int __init el3_mca_probe(struct device *device)
 		return -ENOMEM;
 	}
 
-	el3_cards++;
+	el3_devs[el3_cards++] = dev;
 	return 0;
 }
 
@@ -666,7 +667,6 @@ static int __init el3_mca_probe(struct device *device)
 #ifdef CONFIG_EISA
 static int __init el3_eisa_probe (struct device *device)
 {
-	struct el3_private *lp;
 	short i;
 	int ioaddr, irq, if_port;
 	u16 phys_addr[3];
@@ -678,7 +678,7 @@ static int __init el3_eisa_probe (struct device *device)
 	edev = to_eisa_device (device);
 	ioaddr = edev->base_addr;
 
-	if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509"))
+	if (!request_region(ioaddr, EL3_IO_EXTENT, "3c579-eisa"))
 		return -EBUSY;
 
 	/* Change the register set to the configuration window 0. */
@@ -700,13 +700,7 @@ static int __init el3_eisa_probe (struct device *device)
 
 	netdev_boot_setup_check(dev);
 
-	memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
-	dev->base_addr = ioaddr;
-	dev->irq = irq;
-	dev->if_port = if_port;
-	lp = netdev_priv(dev);
-	lp->dev = device;
-	lp->type = EL3_EISA;
+	el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
 	eisa_set_drvdata (edev, dev);
 	err = el3_common_init(dev);
 
@@ -716,12 +710,11 @@ static int __init el3_eisa_probe (struct device *device)
 		return err;
 	}
 
-	el3_cards++;
+	el3_devs[el3_cards++] = dev;
 	return 0;
 }
 #endif
 
-#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
 /* This remove works for all device types.
  *
  * The net dev must be stored in the driver_data field */
@@ -734,7 +727,6 @@ static int __devexit el3_device_remove (struct device *device)
 	el3_common_remove (dev);
 	return 0;
 }
-#endif
 
 /* Read a word from the EEPROM using the regular EEPROM access register.
    Assume that we are in register window zero.
@@ -749,7 +741,7 @@ static ushort read_eeprom(int ioaddr, int index)
 }
 
 /* Read a word from the EEPROM when in the ISA ID probe state. */
-static ushort __init id_read_eeprom(int index)
+static ushort id_read_eeprom(int index)
 {
 	int bit, word = 0;
 
@@ -765,7 +757,7 @@ static ushort __init id_read_eeprom(int index)
 		word = (word << 1) + (inb(id_port) & 0x01);
 
 	if (el3_debug > 3)
-		printk("  3c509 EEPROM word %d %#4.4x.\n", index, word);
+		printk(KERN_DEBUG "  3c509 EEPROM word %d %#4.4x.\n", index, word);
 
 	return word;
 }
@@ -787,13 +779,13 @@ el3_open(struct net_device *dev)
 
 	EL3WINDOW(0);
 	if (el3_debug > 3)
-		printk("%s: Opening, IRQ %d	 status@%x %4.4x.\n", dev->name,
+		printk(KERN_DEBUG "%s: Opening, IRQ %d	 status@%x %4.4x.\n", dev->name,
 			   dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS));
 
 	el3_up(dev);
 
 	if (el3_debug > 3)
-		printk("%s: Opened 3c509  IRQ %d  status %4.4x.\n",
+		printk(KERN_DEBUG "%s: Opened 3c509  IRQ %d  status %4.4x.\n",
 			   dev->name, dev->irq, inw(ioaddr + EL3_STATUS));
 
 	return 0;
@@ -806,7 +798,7 @@ el3_tx_timeout (struct net_device *dev)
 	int ioaddr = dev->base_addr;
 
 	/* Transmitter timeout, serious problems. */
-	printk("%s: transmit timed out, Tx_status %2.2x status %4.4x "
+	printk(KERN_WARNING "%s: transmit timed out, Tx_status %2.2x status %4.4x "
 		   "Tx FIFO room %d.\n",
 		   dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
 		   inw(ioaddr + TX_FREE));
@@ -831,7 +823,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	lp->stats.tx_bytes += skb->len;
 
 	if (el3_debug > 4) {
-		printk("%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
+		printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
 			   dev->name, skb->len, inw(ioaddr + EL3_STATUS));
 	}
 #if 0
@@ -840,7 +832,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		ushort status = inw(ioaddr + EL3_STATUS);
 		if (status & 0x0001 		/* IRQ line active, missed one. */
 			&& inw(ioaddr + EL3_STATUS) & 1) { 			/* Make sure. */
-			printk("%s: Missed interrupt, status then %04x now %04x"
+			printk(KERN_DEBUG "%s: Missed interrupt, status then %04x now %04x"
 				   "  Tx %2.2x Rx %4.4x.\n", dev->name, status,
 				   inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
 				   inw(ioaddr + RX_STATUS));
@@ -914,7 +906,7 @@ el3_interrupt(int irq, void *dev_id)
 
 	if (el3_debug > 4) {
 		status = inw(ioaddr + EL3_STATUS);
-		printk("%s: interrupt, status %4.4x.\n", dev->name, status);
+		printk(KERN_DEBUG "%s: interrupt, status %4.4x.\n", dev->name, status);
 	}
 
 	while ((status = inw(ioaddr + EL3_STATUS)) &
@@ -925,7 +917,7 @@ el3_interrupt(int irq, void *dev_id)
 
 		if (status & TxAvailable) {
 			if (el3_debug > 5)
-				printk("	TX room bit was handled.\n");
+				printk(KERN_DEBUG "	TX room bit was handled.\n");
 			/* There's room in the FIFO for a full-sized packet. */
 			outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
 			netif_wake_queue (dev);
@@ -964,7 +956,7 @@ el3_interrupt(int irq, void *dev_id)
 		}
 
 		if (--i < 0) {
-			printk("%s: Infinite loop in interrupt, status %4.4x.\n",
+			printk(KERN_ERR "%s: Infinite loop in interrupt, status %4.4x.\n",
 				   dev->name, status);
 			/* Clear all interrupts. */
 			outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
@@ -975,7 +967,7 @@ el3_interrupt(int irq, void *dev_id)
 	}
 
 	if (el3_debug > 4) {
-		printk("%s: exiting interrupt, status %4.4x.\n", dev->name,
+		printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", dev->name,
 			   inw(ioaddr + EL3_STATUS));
 	}
 	spin_unlock(&lp->lock);
@@ -1450,7 +1442,7 @@ el3_up(struct net_device *dev)
 }
 
 /* Power Management support functions */
-#ifdef EL3_SUSPEND
+#ifdef CONFIG_PM
 
 static int
 el3_suspend(struct device *pdev, pm_message_t state)
@@ -1500,79 +1492,102 @@ el3_resume(struct device *pdev)
 	return 0;
 }
 
-#endif /* EL3_SUSPEND */
-
-/* Parameters that may be passed into the module. */
-static int debug = -1;
-static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
-static int xcvr[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
+#endif /* CONFIG_PM */
 
 module_param(debug,int, 0);
 module_param_array(irq, int, NULL, 0);
-module_param_array(xcvr, int, NULL, 0);
 module_param(max_interrupt_work, int, 0);
 MODULE_PARM_DESC(debug, "debug level (0-6)");
 MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-MODULE_PARM_DESC(xcvr,"transceiver(s) (0=internal, 1=external)");
 MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
-#if defined(__ISAPNP__)
+#ifdef CONFIG_PNP
 module_param(nopnp, int, 0);
 MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)");
-MODULE_DEVICE_TABLE(isapnp, el3_isapnp_adapters);
-#endif	/* __ISAPNP__ */
-MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B) ISA/PnP ethernet driver");
+#endif	/* CONFIG_PNP */
+MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B, 3c529, 3c579) ethernet driver");
 MODULE_LICENSE("GPL");
 
 static int __init el3_init_module(void)
 {
 	int ret = 0;
-	el3_cards = 0;
 
 	if (debug >= 0)
 		el3_debug = debug;
 
-	el3_root_dev = NULL;
-	while (el3_probe(el3_cards) == 0) {
-		if (irq[el3_cards] > 1)
-			el3_root_dev->irq = irq[el3_cards];
-		if (xcvr[el3_cards] >= 0)
-			el3_root_dev->if_port = xcvr[el3_cards];
-		el3_cards++;
+#ifdef CONFIG_PNP
+	if (!nopnp) {
+		ret = pnp_register_driver(&el3_pnp_driver);
+		if (!ret)
+			pnp_registered = 1;
+	}
+#endif
+	/* Select an open I/O location at 0x1*0 to do ISA contention select. */
+	/* Start with 0x110 to avoid some sound cards.*/
+	for (id_port = 0x110 ; id_port < 0x200; id_port += 0x10) {
+		if (!request_region(id_port, 1, "3c509-control"))
+			continue;
+		outb(0x00, id_port);
+		outb(0xff, id_port);
+		if (inb(id_port) & 0x01)
+			break;
+		else
+			release_region(id_port, 1);
+	}
+	if (id_port >= 0x200) {
+		id_port = 0;
+		printk(KERN_ERR "No I/O port available for 3c509 activation.\n");
+	} else {
+		ret = isa_register_driver(&el3_isa_driver, EL3_MAX_CARDS);
+		if (!ret)
+			isa_registered = 1;
 	}
-
 #ifdef CONFIG_EISA
 	ret = eisa_driver_register(&el3_eisa_driver);
+	if (!ret)
+		eisa_registered = 1;
 #endif
 #ifdef CONFIG_MCA
-	{
-		int err = mca_register_driver(&el3_mca_driver);
-		if (ret == 0)
-			ret = err;
-	}
+	ret = mca_register_driver(&el3_mca_driver);
+	if (!ret)
+		mca_registered = 1;
+#endif
+
+#ifdef CONFIG_PNP
+	if (pnp_registered)
+		ret = 0;
+#endif
+	if (isa_registered)
+		ret = 0;
+#ifdef CONFIG_EISA
+	if (eisa_registered)
+		ret = 0;
+#endif
+#ifdef CONFIG_MCA
+	if (mca_registered)
+		ret = 0;
 #endif
 	return ret;
 }
 
 static void __exit el3_cleanup_module(void)
 {
-	struct net_device *next_dev;
-
-	while (el3_root_dev) {
-		struct el3_private *lp = netdev_priv(el3_root_dev);
-
-		next_dev = lp->next_dev;
-		el3_common_remove (el3_root_dev);
-		el3_root_dev = next_dev;
-	}
-
+#ifdef CONFIG_PNP
+	if (pnp_registered)
+		pnp_unregister_driver(&el3_pnp_driver);
+#endif
+	if (isa_registered)
+		isa_unregister_driver(&el3_isa_driver);
+	if (id_port)
+		release_region(id_port, 1);
 #ifdef CONFIG_EISA
-	eisa_driver_unregister (&el3_eisa_driver);
+	if (eisa_registered)
+		eisa_driver_unregister(&el3_eisa_driver);
 #endif
 #ifdef CONFIG_MCA
-	mca_unregister_driver(&el3_mca_driver);
+	if (mca_registered)
+		mca_unregister_driver(&el3_mca_driver);
 #endif
 }
 
 module_init (el3_init_module);
 module_exit (el3_cleanup_module);
-

+ 5 - 5
drivers/net/8139too.c

@@ -966,8 +966,8 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
 
 	addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
 	for (i = 0; i < 3; i++)
-		((u16 *) (dev->dev_addr))[i] =
-		    le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+		((__le16 *) (dev->dev_addr))[i] =
+		    cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
 	/* The Rtl8139-specific entries in the device structure. */
@@ -1373,8 +1373,8 @@ static void rtl8139_hw_start (struct net_device *dev)
 	/* unlock Config[01234] and BMCR register writes */
 	RTL_W8_F (Cfg9346, Cfg9346_Unlock);
 	/* Restore our idea of the MAC address. */
-	RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
-	RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+	RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
+	RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
 
 	/* Must enable Tx/Rx before setting transfer thresholds! */
 	RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
@@ -1945,7 +1945,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
 		rmb();
 
 		/* read size+status of next frame from DMA ring buffer */
-		rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
+		rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset));
 		rx_size = rx_status >> 16;
 		pkt_size = rx_size - 4;
 

+ 4 - 2
drivers/net/8390.c

@@ -48,14 +48,16 @@ EXPORT_SYMBOL(__alloc_ei_netdev);
 
 #if defined(MODULE)
 
-int init_module(void)
+static int __init ns8390_module_init(void)
 {
 	return 0;
 }
 
-void cleanup_module(void)
+static void __exit ns8390_module_exit(void)
 {
 }
 
+module_init(ns8390_module_init);
+module_exit(ns8390_module_exit);
 #endif /* MODULE */
 MODULE_LICENSE("GPL");

+ 8 - 88
drivers/net/Kconfig

@@ -467,6 +467,13 @@ config SNI_82596
 	  Say Y here to support the on-board Intel 82596 ethernet controller
 	  built into SNI RM machines.
 
+config KORINA
+	tristate "Korina (IDT RC32434) Ethernet support"
+	depends on NET_ETHERNET && MIKROTIK_RB500
+	help
+	  If you have a Mikrotik RouterBoard 500 or IDT RC32434
+	  based system say Y. Otherwise say N.
+
 config MIPS_JAZZ_SONIC
 	tristate "MIPS JAZZ onboard SONIC Ethernet support"
 	depends on MACH_JAZZ
@@ -1431,7 +1438,7 @@ config CS89x0
 config TC35815
 	tristate "TOSHIBA TC35815 Ethernet support"
 	depends on NET_PCI && PCI && MIPS
-	select MII
+	select PHYLIB
 
 config EEPRO100
 	tristate "EtherExpressPro/100 support (eepro100, original Becker driver)"
@@ -2220,93 +2227,6 @@ config SKY2_DEBUG
 
 	 If unsure, say N.
 
-config SK98LIN
-	tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support (DEPRECATED)"
-	depends on PCI
-	---help---
-	  Say Y here if you have a Marvell Yukon or SysKonnect SK-98xx/SK-95xx
-	  compliant Gigabit Ethernet Adapter.
-
-	  This driver supports the original Yukon chipset. This driver is
-	  deprecated and will be removed from the kernel in the near future,
-	  it has been replaced by the skge driver. skge is cleaner and
-	  seems to work better.
-
-	  This driver does not support the newer Yukon2 chipset. A separate
-	  driver, sky2, is provided to support Yukon2-based adapters.
-
-	  The following adapters are supported by this driver:
-	    - 3Com 3C940 Gigabit LOM Ethernet Adapter
-	    - 3Com 3C941 Gigabit LOM Ethernet Adapter
-	    - Allied Telesyn AT-2970LX Gigabit Ethernet Adapter
-	    - Allied Telesyn AT-2970LX/2SC Gigabit Ethernet Adapter
-	    - Allied Telesyn AT-2970SX Gigabit Ethernet Adapter
-	    - Allied Telesyn AT-2970SX/2SC Gigabit Ethernet Adapter
-	    - Allied Telesyn AT-2970TX Gigabit Ethernet Adapter
-	    - Allied Telesyn AT-2970TX/2TX Gigabit Ethernet Adapter
-	    - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter
-	    - Allied Telesyn AT-2971T Gigabit Ethernet Adapter
-	    - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45
-	    - EG1032 v2 Instant Gigabit Network Adapter
-	    - EG1064 v2 Instant Gigabit Network Adapter
-	    - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit)
-	    - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Albatron)
-	    - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Asus)
-	    - Marvell 88E8001 Gigabit LOM Ethernet Adapter (ECS)
-	    - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Epox)
-	    - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Foxconn)
-	    - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Gigabyte)
-	    - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Iwill)
-	    - Marvell 88E8050 Gigabit LOM Ethernet Adapter (Intel)
-	    - Marvell RDK-8001 Adapter
-	    - Marvell RDK-8002 Adapter
-	    - Marvell RDK-8003 Adapter
-	    - Marvell RDK-8004 Adapter
-	    - Marvell RDK-8006 Adapter
-	    - Marvell RDK-8007 Adapter
-	    - Marvell RDK-8008 Adapter
-	    - Marvell RDK-8009 Adapter
-	    - Marvell RDK-8010 Adapter
-	    - Marvell RDK-8011 Adapter
-	    - Marvell RDK-8012 Adapter
-	    - Marvell RDK-8052 Adapter
-	    - Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter (32 bit)
-	    - Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter (64 bit)
-	    - N-Way PCI-Bus Giga-Card 1000/100/10Mbps(L)
-	    - SK-9521 10/100/1000Base-T Adapter
-	    - SK-9521 V2.0 10/100/1000Base-T Adapter
-	    - SK-9821 Gigabit Ethernet Server Adapter (SK-NET GE-T)
-	    - SK-9821 V2.0 Gigabit Ethernet 10/100/1000Base-T Adapter
-	    - SK-9822 Gigabit Ethernet Server Adapter (SK-NET GE-T dual link)
-	    - SK-9841 Gigabit Ethernet Server Adapter (SK-NET GE-LX)
-	    - SK-9841 V2.0 Gigabit Ethernet 1000Base-LX Adapter
-	    - SK-9842 Gigabit Ethernet Server Adapter (SK-NET GE-LX dual link)
-	    - SK-9843 Gigabit Ethernet Server Adapter (SK-NET GE-SX)
-	    - SK-9843 V2.0 Gigabit Ethernet 1000Base-SX Adapter
-	    - SK-9844 Gigabit Ethernet Server Adapter (SK-NET GE-SX dual link)
-	    - SK-9851 V2.0 Gigabit Ethernet 1000Base-SX Adapter
-	    - SK-9861 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition)
-	    - SK-9861 V2.0 Gigabit Ethernet 1000Base-SX Adapter
-	    - SK-9862 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition dual link)
-	    - SK-9871 Gigabit Ethernet Server Adapter (SK-NET GE-ZX)
-	    - SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter
-	    - SK-9872 Gigabit Ethernet Server Adapter (SK-NET GE-ZX dual link)
-	    - SMC EZ Card 1000 (SMC9452TXV.2)
-	  
-	  The adapters support Jumbo Frames.
-	  The dual link adapters support link-failover and dual port features.
-	  Both Marvell Yukon and SysKonnect SK-98xx/SK-95xx adapters support 
-	  the scatter-gather functionality with sendfile(). Please refer to 
-	  <file:Documentation/networking/sk98lin.txt> for more information about
-	  optional driver parameters.
-	  Questions concerning this driver may be addressed to:
-	      <linux@syskonnect.de>
-	  
-	  If you want to compile this driver as a module ( = code which can be
-	  inserted in and removed from the running kernel whenever you want),
-	  say M here and read <file:Documentation/kbuild/modules.txt>. The module will
-	  be called sk98lin. This is recommended.
-
 config VIA_VELOCITY
 	tristate "VIA Velocity support"
 	depends on PCI

+ 2 - 2
drivers/net/Makefile

@@ -15,7 +15,7 @@ obj-$(CONFIG_CHELSIO_T3) += cxgb3/
 obj-$(CONFIG_EHEA) += ehea/
 obj-$(CONFIG_CAN) += can/
 obj-$(CONFIG_BONDING) += bonding/
-obj-$(CONFIG_ATL1) += atl1/
+obj-$(CONFIG_ATL1) += atlx/
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_TEHUTI) += tehuti.o
 
@@ -75,7 +75,6 @@ ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
 obj-$(CONFIG_TC35815) += tc35815.o
 obj-$(CONFIG_SKGE) += skge.o
 obj-$(CONFIG_SKY2) += sky2.o
-obj-$(CONFIG_SK98LIN) += sk98lin/
 obj-$(CONFIG_SKFP) += skfp/
 obj-$(CONFIG_VIA_RHINE) += via-rhine.o
 obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
@@ -191,6 +190,7 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o
 obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
 obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
 obj-$(CONFIG_EQUALIZER) += eql.o
+obj-$(CONFIG_KORINA) += korina.o
 obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
 obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
 obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o

+ 4 - 2
drivers/net/appletalk/cops.c

@@ -1010,7 +1010,7 @@ module_param(io, int, 0);
 module_param(irq, int, 0);
 module_param(board_type, int, 0);
 
-int __init init_module(void)
+static int __init cops_module_init(void)
 {
 	if (io == 0)
 		printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
@@ -1021,12 +1021,14 @@ int __init init_module(void)
         return 0;
 }
 
-void __exit cleanup_module(void)
+static void __exit cops_module_exit(void)
 {
 	unregister_netdev(cops_dev);
 	cleanup_card(cops_dev);
 	free_netdev(cops_dev);
 }
+module_init(cops_module_init);
+module_exit(cops_module_exit);
 #endif /* MODULE */
 
 /*

+ 3 - 2
drivers/net/arcnet/arcnet.c

@@ -940,7 +940,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
 
 			/* is the RECON info empty or old? */
 			if (!lp->first_recon || !lp->last_recon ||
-			    jiffies - lp->last_recon > HZ * 10) {
+			    time_after(jiffies, lp->last_recon + HZ * 10)) {
 				if (lp->network_down)
 					BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n");
 				lp->first_recon = lp->last_recon = jiffies;
@@ -974,7 +974,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
 					lp->num_recons = 1;
 				}
 			}
-		} else if (lp->network_down && jiffies - lp->last_recon > HZ * 10) {
+		} else if (lp->network_down &&
+				time_after(jiffies, lp->last_recon + HZ * 10)) {
 			if (lp->network_down)
 				BUGMSG(D_NORMAL, "cabling restored?\n");
 			lp->first_recon = lp->last_recon = 0;

+ 4 - 3
drivers/net/arcnet/com20020.c

@@ -348,14 +348,15 @@ MODULE_LICENSE("GPL");
 
 #ifdef MODULE
 
-int init_module(void)
+static int __init com20020_module_init(void)
 {
 	BUGLVL(D_NORMAL) printk(VERSION);
 	return 0;
 }
 
-void cleanup_module(void)
+static void __exit com20020_module_exit(void)
 {
 }
-
+module_init(com20020_module_init);
+module_exit(com20020_module_exit);
 #endif				/* MODULE */

+ 4 - 3
drivers/net/at1700.c

@@ -881,7 +881,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
 MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
 MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
 
-int __init init_module(void)
+static int __init at1700_module_init(void)
 {
 	if (io == 0)
 		printk("at1700: You should not use auto-probing with insmod!\n");
@@ -891,13 +891,14 @@ int __init init_module(void)
 	return 0;
 }
 
-void __exit
-cleanup_module(void)
+static void __exit at1700_module_exit(void)
 {
 	unregister_netdev(dev_at1700);
 	cleanup_card(dev_at1700);
 	free_netdev(dev_at1700);
 }
+module_init(at1700_module_init);
+module_exit(at1700_module_exit);
 #endif /* MODULE */
 MODULE_LICENSE("GPL");
 

+ 4 - 3
drivers/net/atarilance.c

@@ -1155,7 +1155,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
 #ifdef MODULE
 static struct net_device *atarilance_dev;
 
-int __init init_module(void)
+static int __init atarilance_module_init(void)
 {
 	atarilance_dev = atarilance_probe(-1);
 	if (IS_ERR(atarilance_dev))
@@ -1163,13 +1163,14 @@ int __init init_module(void)
 	return 0;
 }
 
-void __exit cleanup_module(void)
+static void __exit atarilance_module_exit(void)
 {
 	unregister_netdev(atarilance_dev);
 	free_irq(atarilance_dev->irq, atarilance_dev);
 	free_netdev(atarilance_dev);
 }
-
+module_init(atarilance_module_init);
+module_exit(atarilance_module_exit);
 #endif /* MODULE */
 
 

+ 0 - 2
drivers/net/atl1/Makefile

@@ -1,2 +0,0 @@
-obj-$(CONFIG_ATL1)	+= atl1.o
-atl1-y			+= atl1_main.o atl1_hw.o atl1_ethtool.o atl1_param.o

+ 0 - 286
drivers/net/atl1/atl1.h

@@ -1,286 +0,0 @@
-/*
- * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
- * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
- *
- * Derived from Intel e1000 driver
- * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- */
-
-#ifndef _ATL1_H_
-#define _ATL1_H_
-
-#include <linux/types.h>
-#include <linux/if_vlan.h>
-
-#include "atl1_hw.h"
-
-/* function prototypes needed by multiple files */
-s32 atl1_up(struct atl1_adapter *adapter);
-void atl1_down(struct atl1_adapter *adapter);
-int atl1_reset(struct atl1_adapter *adapter);
-s32 atl1_setup_ring_resources(struct atl1_adapter *adapter);
-void atl1_free_ring_resources(struct atl1_adapter *adapter);
-
-extern char atl1_driver_name[];
-extern char atl1_driver_version[];
-extern const struct ethtool_ops atl1_ethtool_ops;
-
-struct atl1_adapter;
-
-#define ATL1_MAX_INTR		3
-#define ATL1_MAX_TX_BUF_LEN	0x3000	/* 12288 bytes */
-
-#define ATL1_DEFAULT_TPD	256
-#define ATL1_MAX_TPD		1024
-#define ATL1_MIN_TPD		64
-#define ATL1_DEFAULT_RFD	512
-#define ATL1_MIN_RFD		128
-#define ATL1_MAX_RFD		2048
-
-#define ATL1_GET_DESC(R, i, type)	(&(((type *)((R)->desc))[i]))
-#define ATL1_RFD_DESC(R, i)	ATL1_GET_DESC(R, i, struct rx_free_desc)
-#define ATL1_TPD_DESC(R, i)	ATL1_GET_DESC(R, i, struct tx_packet_desc)
-#define ATL1_RRD_DESC(R, i)	ATL1_GET_DESC(R, i, struct rx_return_desc)
-
-/*
- * This detached comment is preserved for documentation purposes only.
- * It was originally attached to some code that got deleted, but seems
- * important enough to keep around...
- *
- * <begin detached comment>
- * Some workarounds require millisecond delays and are run during interrupt
- * context.  Most notably, when establishing link, the phy may need tweaking
- * but cannot process phy register reads/writes faster than millisecond
- * intervals...and we establish link due to a "link status change" interrupt.
- * <end detached comment>
- */
-
-/*
- * atl1_ring_header represents a single, contiguous block of DMA space
- * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
- * message blocks (cmb, smb) described below
- */
-struct atl1_ring_header {
-	void *desc;		/* virtual address */
-	dma_addr_t dma;		/* physical address*/
-	unsigned int size;	/* length in bytes */
-};
-
-/*
- * atl1_buffer is wrapper around a pointer to a socket buffer
- * so a DMA handle can be stored along with the skb
- */
-struct atl1_buffer {
-	struct sk_buff *skb;	/* socket buffer */
-	u16 length;		/* rx buffer length */
-	u16 alloced;		/* 1 if skb allocated */
-	dma_addr_t dma;
-};
-
-/* transmit packet descriptor (tpd) ring */
-struct atl1_tpd_ring {
-	void *desc;		/* descriptor ring virtual address */
-	dma_addr_t dma;		/* descriptor ring physical address */
-	u16 size;		/* descriptor ring length in bytes */
-	u16 count;		/* number of descriptors in the ring */
-	u16 hw_idx;		/* hardware index */
-	atomic_t next_to_clean;
-	atomic_t next_to_use;
-	struct atl1_buffer *buffer_info;
-};
-
-/* receive free descriptor (rfd) ring */
-struct atl1_rfd_ring {
-	void *desc;		/* descriptor ring virtual address */
-	dma_addr_t dma;		/* descriptor ring physical address */
-	u16 size;		/* descriptor ring length in bytes */
-	u16 count;		/* number of descriptors in the ring */
-	atomic_t next_to_use;
-	u16 next_to_clean;
-	struct atl1_buffer *buffer_info;
-};
-
-/* receive return descriptor (rrd) ring */
-struct atl1_rrd_ring {
-	void *desc;		/* descriptor ring virtual address */
-	dma_addr_t dma;		/* descriptor ring physical address */
-	unsigned int size;	/* descriptor ring length in bytes */
-	u16 count;		/* number of descriptors in the ring */
-	u16 next_to_use;
-	atomic_t next_to_clean;
-};
-
-/* coalescing message block (cmb) */
-struct atl1_cmb {
-	struct coals_msg_block *cmb;
-	dma_addr_t dma;
-};
-
-/* statistics message block (smb) */
-struct atl1_smb {
-	struct stats_msg_block *smb;
-	dma_addr_t dma;
-};
-
-/* Statistics counters */
-struct atl1_sft_stats {
-	u64 rx_packets;
-	u64 tx_packets;
-	u64 rx_bytes;
-	u64 tx_bytes;
-	u64 multicast;
-	u64 collisions;
-	u64 rx_errors;
-	u64 rx_length_errors;
-	u64 rx_crc_errors;
-	u64 rx_frame_errors;
-	u64 rx_fifo_errors;
-	u64 rx_missed_errors;
-	u64 tx_errors;
-	u64 tx_fifo_errors;
-	u64 tx_aborted_errors;
-	u64 tx_window_errors;
-	u64 tx_carrier_errors;
-	u64 tx_pause;		/* num pause packets transmitted. */
-	u64 excecol;		/* num tx packets w/ excessive collisions. */
-	u64 deffer;		/* num tx packets deferred */
-	u64 scc;		/* num packets subsequently transmitted
-				 * successfully w/ single prior collision. */
-	u64 mcc;		/* num packets subsequently transmitted
-				 * successfully w/ multiple prior collisions. */
-	u64 latecol;		/* num tx packets  w/ late collisions. */
-	u64 tx_underun;		/* num tx packets aborted due to transmit
-				 * FIFO underrun, or TRD FIFO underrun */
-	u64 tx_trunc;		/* num tx packets truncated due to size
-				 * exceeding MTU, regardless whether truncated
-				 * by the chip or not. (The name doesn't really
-				 * reflect the meaning in this case.) */
-	u64 rx_pause;		/* num Pause packets received. */
-	u64 rx_rrd_ov;
-	u64 rx_trunc;
-};
-
-/* hardware structure */
-struct atl1_hw {
-	u8 __iomem *hw_addr;
-	struct atl1_adapter *back;
-	enum atl1_dma_order dma_ord;
-	enum atl1_dma_rcb rcb_value;
-	enum atl1_dma_req_block dmar_block;
-	enum atl1_dma_req_block dmaw_block;
-	u8 preamble_len;
-	u8 max_retry;		/* Retransmission maximum, after which the
-				 * packet will be discarded */
-	u8 jam_ipg;		/* IPG to start JAM for collision based flow
-				 * control in half-duplex mode. In units of
-				 * 8-bit time */
-	u8 ipgt;		/* Desired back to back inter-packet gap.
-				 * The default is 96-bit time */
-	u8 min_ifg;		/* Minimum number of IFG to enforce in between
-				 * receive frames. Frame gap below such IFP
-				 * is dropped */
-	u8 ipgr1;		/* 64bit Carrier-Sense window */
-	u8 ipgr2;		/* 96-bit IPG window */
-	u8 tpd_burst;		/* Number of TPD to prefetch in cache-aligned
-				 * burst. Each TPD is 16 bytes long */
-	u8 rfd_burst;		/* Number of RFD to prefetch in cache-aligned
-				 * burst. Each RFD is 12 bytes long */
-	u8 rfd_fetch_gap;
-	u8 rrd_burst;		/* Threshold number of RRDs that can be retired
-				 * in a burst. Each RRD is 16 bytes long */
-	u8 tpd_fetch_th;
-	u8 tpd_fetch_gap;
-	u16 tx_jumbo_task_th;
-	u16 txf_burst;		/* Number of data bytes to read in a cache-
-				 * aligned burst. Each SRAM entry is 8 bytes */
-	u16 rx_jumbo_th;	/* Jumbo packet size for non-VLAN packet. VLAN
-				 * packets should add 4 bytes */
-	u16 rx_jumbo_lkah;
-	u16 rrd_ret_timer;	/* RRD retirement timer. Decrement by 1 after
-				 * every 512ns passes. */
-	u16 lcol;		/* Collision Window */
-
-	u16 cmb_tpd;
-	u16 cmb_rrd;
-	u16 cmb_rx_timer;
-	u16 cmb_tx_timer;
-	u32 smb_timer;
-	u16 media_type;
-	u16 autoneg_advertised;
-
-	u16 mii_autoneg_adv_reg;
-	u16 mii_1000t_ctrl_reg;
-
-	u32 max_frame_size;
-	u32 min_frame_size;
-
-	u16 dev_rev;
-
-	/* spi flash */
-	u8 flash_vendor;
-
-	u8 mac_addr[ETH_ALEN];
-	u8 perm_mac_addr[ETH_ALEN];
-
-	bool phy_configured;
-};
-
-struct atl1_adapter {
-	struct net_device *netdev;
-	struct pci_dev *pdev;
-	struct net_device_stats net_stats;
-	struct atl1_sft_stats soft_stats;
-	struct vlan_group *vlgrp;
-	u32 rx_buffer_len;
-	u32 wol;
-	u16 link_speed;
-	u16 link_duplex;
-	spinlock_t lock;
-	struct work_struct tx_timeout_task;
-	struct work_struct link_chg_task;
-	struct work_struct pcie_dma_to_rst_task;
-	struct timer_list watchdog_timer;
-	struct timer_list phy_config_timer;
-	bool phy_timer_pending;
-
-	/* all descriptor rings' memory */
-	struct atl1_ring_header ring_header;
-
-	/* TX */
-	struct atl1_tpd_ring tpd_ring;
-	spinlock_t mb_lock;
-
-	/* RX */
-	struct atl1_rfd_ring rfd_ring;
-	struct atl1_rrd_ring rrd_ring;
-	u64 hw_csum_err;
-	u64 hw_csum_good;
-
-	u16 imt;	/* interrupt moderator timer (2us resolution */
-	u16 ict;	/* interrupt clear timer (2us resolution */
-	struct mii_if_info mii;		/* MII interface info */
-
-	/* structs defined in atl1_hw.h */
-	u32 bd_number;			/* board number */
-	bool pci_using_64;
-	struct atl1_hw hw;
-	struct atl1_smb smb;
-	struct atl1_cmb cmb;
-};
-
-#endif	/* _ATL1_H_ */

+ 0 - 505
drivers/net/atl1/atl1_ethtool.c

@@ -1,505 +0,0 @@
-/*
- * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
- * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
- *
- * Derived from Intel e1000 driver
- * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- */
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/ethtool.h>
-#include <linux/netdevice.h>
-#include <linux/mii.h>
-#include <asm/uaccess.h>
-
-#include "atl1.h"
-
-struct atl1_stats {
-	char stat_string[ETH_GSTRING_LEN];
-	int sizeof_stat;
-	int stat_offset;
-};
-
-#define ATL1_STAT(m) sizeof(((struct atl1_adapter *)0)->m), \
-	offsetof(struct atl1_adapter, m)
-
-static struct atl1_stats atl1_gstrings_stats[] = {
-	{"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
-	{"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
-	{"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
-	{"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
-	{"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
-	{"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
-	{"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
-	{"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
-	{"multicast", ATL1_STAT(soft_stats.multicast)},
-	{"collisions", ATL1_STAT(soft_stats.collisions)},
-	{"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
-	{"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
-	{"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
-	{"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
-	{"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
-	{"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
-	{"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
-	{"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
-	{"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
-	{"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
-	{"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
-	{"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
-	{"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
-	{"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
-	{"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
-	{"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
-	{"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
-	{"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
-	{"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
-	{"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
-	{"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
-};
-
-static void atl1_get_ethtool_stats(struct net_device *netdev,
-				struct ethtool_stats *stats, u64 *data)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	int i;
-	char *p;
-
-	for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
-		p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
-		data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
-			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-	}
-
-}
-
-static int atl1_get_sset_count(struct net_device *netdev, int sset)
-{
-	switch (sset) {
-	case ETH_SS_STATS:
-		return ARRAY_SIZE(atl1_gstrings_stats);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static int atl1_get_settings(struct net_device *netdev,
-				struct ethtool_cmd *ecmd)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct atl1_hw *hw = &adapter->hw;
-
-	ecmd->supported = (SUPPORTED_10baseT_Half |
-			   SUPPORTED_10baseT_Full |
-			   SUPPORTED_100baseT_Half |
-			   SUPPORTED_100baseT_Full |
-			   SUPPORTED_1000baseT_Full |
-			   SUPPORTED_Autoneg | SUPPORTED_TP);
-	ecmd->advertising = ADVERTISED_TP;
-	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
-	    hw->media_type == MEDIA_TYPE_1000M_FULL) {
-		ecmd->advertising |= ADVERTISED_Autoneg;
-		if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
-			ecmd->advertising |= ADVERTISED_Autoneg;
-			ecmd->advertising |=
-			    (ADVERTISED_10baseT_Half |
-			     ADVERTISED_10baseT_Full |
-			     ADVERTISED_100baseT_Half |
-			     ADVERTISED_100baseT_Full |
-			     ADVERTISED_1000baseT_Full);
-		}
-		else
-			ecmd->advertising |= (ADVERTISED_1000baseT_Full);
-	}
-	ecmd->port = PORT_TP;
-	ecmd->phy_address = 0;
-	ecmd->transceiver = XCVR_INTERNAL;
-
-	if (netif_carrier_ok(adapter->netdev)) {
-		u16 link_speed, link_duplex;
-		atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
-		ecmd->speed = link_speed;
-		if (link_duplex == FULL_DUPLEX)
-			ecmd->duplex = DUPLEX_FULL;
-		else
-			ecmd->duplex = DUPLEX_HALF;
-	} else {
-		ecmd->speed = -1;
-		ecmd->duplex = -1;
-	}
-	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
-	    hw->media_type == MEDIA_TYPE_1000M_FULL)
-		ecmd->autoneg = AUTONEG_ENABLE;
-	else
-		ecmd->autoneg = AUTONEG_DISABLE;
-
-	return 0;
-}
-
-static int atl1_set_settings(struct net_device *netdev,
-				struct ethtool_cmd *ecmd)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct atl1_hw *hw = &adapter->hw;
-	u16 phy_data;
-	int ret_val = 0;
-	u16 old_media_type = hw->media_type;
-
-	if (netif_running(adapter->netdev)) {
-		dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n");
-		atl1_down(adapter);
-	}
-
-	if (ecmd->autoneg == AUTONEG_ENABLE)
-		hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
-	else {
-		if (ecmd->speed == SPEED_1000) {
-			if (ecmd->duplex != DUPLEX_FULL) {
-				dev_warn(&adapter->pdev->dev,
-					"can't force to 1000M half duplex\n");
-				ret_val = -EINVAL;
-				goto exit_sset;
-			}
-			hw->media_type = MEDIA_TYPE_1000M_FULL;
-		} else if (ecmd->speed == SPEED_100) {
-			if (ecmd->duplex == DUPLEX_FULL) {
-				hw->media_type = MEDIA_TYPE_100M_FULL;
-			} else
-				hw->media_type = MEDIA_TYPE_100M_HALF;
-		} else {
-			if (ecmd->duplex == DUPLEX_FULL)
-				hw->media_type = MEDIA_TYPE_10M_FULL;
-			else
-				hw->media_type = MEDIA_TYPE_10M_HALF;
-		}
-	}
-	switch (hw->media_type) {
-	case MEDIA_TYPE_AUTO_SENSOR:
-		ecmd->advertising =
-		    ADVERTISED_10baseT_Half |
-		    ADVERTISED_10baseT_Full |
-		    ADVERTISED_100baseT_Half |
-		    ADVERTISED_100baseT_Full |
-		    ADVERTISED_1000baseT_Full |
-		    ADVERTISED_Autoneg | ADVERTISED_TP;
-		break;
-	case MEDIA_TYPE_1000M_FULL:
-		ecmd->advertising =
-		    ADVERTISED_1000baseT_Full |
-		    ADVERTISED_Autoneg | ADVERTISED_TP;
-		break;
-	default:
-		ecmd->advertising = 0;
-		break;
-	}
-	if (atl1_phy_setup_autoneg_adv(hw)) {
-		ret_val = -EINVAL;
-		dev_warn(&adapter->pdev->dev,
-			"invalid ethtool speed/duplex setting\n");
-		goto exit_sset;
-	}
-	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
-	    hw->media_type == MEDIA_TYPE_1000M_FULL)
-		phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
-	else {
-		switch (hw->media_type) {
-		case MEDIA_TYPE_100M_FULL:
-			phy_data =
-			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
-			    MII_CR_RESET;
-			break;
-		case MEDIA_TYPE_100M_HALF:
-			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
-			break;
-		case MEDIA_TYPE_10M_FULL:
-			phy_data =
-			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
-			break;
-		default:	/* MEDIA_TYPE_10M_HALF: */
-			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
-			break;
-		}
-	}
-	atl1_write_phy_reg(hw, MII_BMCR, phy_data);
-exit_sset:
-	if (ret_val)
-		hw->media_type = old_media_type;
-
-	if (netif_running(adapter->netdev)) {
-		dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n");
-		atl1_up(adapter);
-	} else if (!ret_val) {
-		dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n");
-		atl1_reset(adapter);
-	}
-	return ret_val;
-}
-
-static void atl1_get_drvinfo(struct net_device *netdev,
-				struct ethtool_drvinfo *drvinfo)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-
-	strncpy(drvinfo->driver, atl1_driver_name, sizeof(drvinfo->driver));
-	strncpy(drvinfo->version, atl1_driver_version,
-		sizeof(drvinfo->version));
-	strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
-		sizeof(drvinfo->bus_info));
-	drvinfo->eedump_len = ATL1_EEDUMP_LEN;
-}
-
-static void atl1_get_wol(struct net_device *netdev,
-			    struct ethtool_wolinfo *wol)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-
-	wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
-	wol->wolopts = 0;
-	if (adapter->wol & ATL1_WUFC_EX)
-		wol->wolopts |= WAKE_UCAST;
-	if (adapter->wol & ATL1_WUFC_MC)
-		wol->wolopts |= WAKE_MCAST;
-	if (adapter->wol & ATL1_WUFC_BC)
-		wol->wolopts |= WAKE_BCAST;
-	if (adapter->wol & ATL1_WUFC_MAG)
-		wol->wolopts |= WAKE_MAGIC;
-	return;
-}
-
-static int atl1_set_wol(struct net_device *netdev,
-			struct ethtool_wolinfo *wol)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-
-	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
-		return -EOPNOTSUPP;
-	adapter->wol = 0;
-	if (wol->wolopts & WAKE_UCAST)
-		adapter->wol |= ATL1_WUFC_EX;
-	if (wol->wolopts & WAKE_MCAST)
-		adapter->wol |= ATL1_WUFC_MC;
-	if (wol->wolopts & WAKE_BCAST)
-		adapter->wol |= ATL1_WUFC_BC;
-	if (wol->wolopts & WAKE_MAGIC)
-		adapter->wol |= ATL1_WUFC_MAG;
-	return 0;
-}
-
-static void atl1_get_ringparam(struct net_device *netdev,
-			    struct ethtool_ringparam *ring)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
-	struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
-
-	ring->rx_max_pending = ATL1_MAX_RFD;
-	ring->tx_max_pending = ATL1_MAX_TPD;
-	ring->rx_mini_max_pending = 0;
-	ring->rx_jumbo_max_pending = 0;
-	ring->rx_pending = rxdr->count;
-	ring->tx_pending = txdr->count;
-	ring->rx_mini_pending = 0;
-	ring->rx_jumbo_pending = 0;
-}
-
-static int atl1_set_ringparam(struct net_device *netdev,
-				struct ethtool_ringparam *ring)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
-	struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
-	struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
-
-	struct atl1_tpd_ring tpd_old, tpd_new;
-	struct atl1_rfd_ring rfd_old, rfd_new;
-	struct atl1_rrd_ring rrd_old, rrd_new;
-	struct atl1_ring_header rhdr_old, rhdr_new;
-	int err;
-
-	tpd_old = adapter->tpd_ring;
-	rfd_old = adapter->rfd_ring;
-	rrd_old = adapter->rrd_ring;
-	rhdr_old = adapter->ring_header;
-
-	if (netif_running(adapter->netdev))
-		atl1_down(adapter);
-
-	rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
-	rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
-			rfdr->count;
-	rfdr->count = (rfdr->count + 3) & ~3;
-	rrdr->count = rfdr->count;
-
-	tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
-	tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
-			tpdr->count;
-	tpdr->count = (tpdr->count + 3) & ~3;
-
-	if (netif_running(adapter->netdev)) {
-		/* try to get new resources before deleting old */
-		err = atl1_setup_ring_resources(adapter);
-		if (err)
-			goto err_setup_ring;
-
-		/*
-		 * save the new, restore the old in order to free it,
-		 * then restore the new back again
-		 */
-
-		rfd_new = adapter->rfd_ring;
-		rrd_new = adapter->rrd_ring;
-		tpd_new = adapter->tpd_ring;
-		rhdr_new = adapter->ring_header;
-		adapter->rfd_ring = rfd_old;
-		adapter->rrd_ring = rrd_old;
-		adapter->tpd_ring = tpd_old;
-		adapter->ring_header = rhdr_old;
-		atl1_free_ring_resources(adapter);
-		adapter->rfd_ring = rfd_new;
-		adapter->rrd_ring = rrd_new;
-		adapter->tpd_ring = tpd_new;
-		adapter->ring_header = rhdr_new;
-
-		err = atl1_up(adapter);
-		if (err)
-			return err;
-	}
-	return 0;
-
-err_setup_ring:
-	adapter->rfd_ring = rfd_old;
-	adapter->rrd_ring = rrd_old;
-	adapter->tpd_ring = tpd_old;
-	adapter->ring_header = rhdr_old;
-	atl1_up(adapter);
-	return err;
-}
-
-static void atl1_get_pauseparam(struct net_device *netdev,
-			     struct ethtool_pauseparam *epause)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct atl1_hw *hw = &adapter->hw;
-
-	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
-	    hw->media_type == MEDIA_TYPE_1000M_FULL) {
-		epause->autoneg = AUTONEG_ENABLE;
-	} else {
-		epause->autoneg = AUTONEG_DISABLE;
-	}
-	epause->rx_pause = 1;
-	epause->tx_pause = 1;
-}
-
-static int atl1_set_pauseparam(struct net_device *netdev,
-			     struct ethtool_pauseparam *epause)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct atl1_hw *hw = &adapter->hw;
-
-	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
-	    hw->media_type == MEDIA_TYPE_1000M_FULL) {
-		epause->autoneg = AUTONEG_ENABLE;
-	} else {
-		epause->autoneg = AUTONEG_DISABLE;
-	}
-
-	epause->rx_pause = 1;
-	epause->tx_pause = 1;
-
-	return 0;
-}
-
-static u32 atl1_get_rx_csum(struct net_device *netdev)
-{
-	return 1;
-}
-
-static void atl1_get_strings(struct net_device *netdev, u32 stringset,
-				u8 *data)
-{
-	u8 *p = data;
-	int i;
-
-	switch (stringset) {
-	case ETH_SS_STATS:
-		for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
-			memcpy(p, atl1_gstrings_stats[i].stat_string,
-				ETH_GSTRING_LEN);
-			p += ETH_GSTRING_LEN;
-		}
-		break;
-	}
-}
-
-static int atl1_nway_reset(struct net_device *netdev)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct atl1_hw *hw = &adapter->hw;
-
-	if (netif_running(netdev)) {
-		u16 phy_data;
-		atl1_down(adapter);
-
-		if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
-			hw->media_type == MEDIA_TYPE_1000M_FULL) {
-			phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
-		} else {
-			switch (hw->media_type) {
-			case MEDIA_TYPE_100M_FULL:
-				phy_data = MII_CR_FULL_DUPLEX |
-					MII_CR_SPEED_100 | MII_CR_RESET;
-				break;
-			case MEDIA_TYPE_100M_HALF:
-				phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
-				break;
-			case MEDIA_TYPE_10M_FULL:
-				phy_data = MII_CR_FULL_DUPLEX |
-					MII_CR_SPEED_10 | MII_CR_RESET;
-				break;
-			default:  /* MEDIA_TYPE_10M_HALF */
-				phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
-			}
-		}
-		atl1_write_phy_reg(hw, MII_BMCR, phy_data);
-		atl1_up(adapter);
-	}
-	return 0;
-}
-
-const struct ethtool_ops atl1_ethtool_ops = {
-	.get_settings		= atl1_get_settings,
-	.set_settings		= atl1_set_settings,
-	.get_drvinfo		= atl1_get_drvinfo,
-	.get_wol		= atl1_get_wol,
-	.set_wol		= atl1_set_wol,
-	.get_ringparam		= atl1_get_ringparam,
-	.set_ringparam		= atl1_set_ringparam,
-	.get_pauseparam		= atl1_get_pauseparam,
-	.set_pauseparam 	= atl1_set_pauseparam,
-	.get_rx_csum		= atl1_get_rx_csum,
-	.set_tx_csum		= ethtool_op_set_tx_hw_csum,
-	.get_link		= ethtool_op_get_link,
-	.set_sg			= ethtool_op_set_sg,
-	.get_strings		= atl1_get_strings,
-	.nway_reset		= atl1_nway_reset,
-	.get_ethtool_stats	= atl1_get_ethtool_stats,
-	.get_sset_count		= atl1_get_sset_count,
-	.set_tso		= ethtool_op_set_tso,
-};

+ 0 - 720
drivers/net/atl1/atl1_hw.c

@@ -1,720 +0,0 @@
-/*
- * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
- * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
- *
- * Derived from Intel e1000 driver
- * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- */
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/if_vlan.h>
-#include <linux/etherdevice.h>
-#include <linux/crc32.h>
-#include <asm/byteorder.h>
-
-#include "atl1.h"
-
-/*
- * Reset the transmit and receive units; mask and clear all interrupts.
- * hw - Struct containing variables accessed by shared code
- * return : ATL1_SUCCESS  or  idle status (if error)
- */
-s32 atl1_reset_hw(struct atl1_hw *hw)
-{
-	struct pci_dev *pdev = hw->back->pdev;
-	u32 icr;
-	int i;
-
-	/*
-	 * Clear Interrupt mask to stop board from generating
-	 * interrupts & Clear any pending interrupt events
-	 */
-	/*
-	 * iowrite32(0, hw->hw_addr + REG_IMR);
-	 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
-	 */
-
-	/*
-	 * Issue Soft Reset to the MAC.  This will reset the chip's
-	 * transmit, receive, DMA.  It will not effect
-	 * the current PCI configuration.  The global reset bit is self-
-	 * clearing, and should clear within a microsecond.
-	 */
-	iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
-	ioread32(hw->hw_addr + REG_MASTER_CTRL);
-
-	iowrite16(1, hw->hw_addr + REG_GPHY_ENABLE);
-	ioread16(hw->hw_addr + REG_GPHY_ENABLE);
-
-	msleep(1);		/* delay about 1ms */
-
-	/* Wait at least 10ms for All module to be Idle */
-	for (i = 0; i < 10; i++) {
-		icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
-		if (!icr)
-			break;
-		msleep(1);	/* delay 1 ms */
-		cpu_relax();	/* FIXME: is this still the right way to do this? */
-	}
-
-	if (icr) {
-		dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
-		return icr;
-	}
-
-	return ATL1_SUCCESS;
-}
-
-/* function about EEPROM
- *
- * check_eeprom_exist
- * return 0 if eeprom exist
- */
-static int atl1_check_eeprom_exist(struct atl1_hw *hw)
-{
-	u32 value;
-	value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
-	if (value & SPI_FLASH_CTRL_EN_VPD) {
-		value &= ~SPI_FLASH_CTRL_EN_VPD;
-		iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
-	}
-
-	value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
-	return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
-}
-
-static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
-{
-	int i;
-	u32 control;
-
-	if (offset & 3)
-		return false;	/* address do not align */
-
-	iowrite32(0, hw->hw_addr + REG_VPD_DATA);
-	control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
-	iowrite32(control, hw->hw_addr + REG_VPD_CAP);
-	ioread32(hw->hw_addr + REG_VPD_CAP);
-
-	for (i = 0; i < 10; i++) {
-		msleep(2);
-		control = ioread32(hw->hw_addr + REG_VPD_CAP);
-		if (control & VPD_CAP_VPD_FLAG)
-			break;
-	}
-	if (control & VPD_CAP_VPD_FLAG) {
-		*p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
-		return true;
-	}
-	return false;		/* timeout */
-}
-
-/*
- * Reads the value from a PHY register
- * hw - Struct containing variables accessed by shared code
- * reg_addr - address of the PHY register to read
- */
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
-{
-	u32 val;
-	int i;
-
-	val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
-		MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
-		MDIO_CLK_SEL_SHIFT;
-	iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
-	ioread32(hw->hw_addr + REG_MDIO_CTRL);
-
-	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
-		udelay(2);
-		val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
-		if (!(val & (MDIO_START | MDIO_BUSY)))
-			break;
-	}
-	if (!(val & (MDIO_START | MDIO_BUSY))) {
-		*phy_data = (u16) val;
-		return ATL1_SUCCESS;
-	}
-	return ATL1_ERR_PHY;
-}
-
-#define CUSTOM_SPI_CS_SETUP	2
-#define CUSTOM_SPI_CLK_HI	2
-#define CUSTOM_SPI_CLK_LO	2
-#define CUSTOM_SPI_CS_HOLD	2
-#define CUSTOM_SPI_CS_HI	3
-
-static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
-{
-	int i;
-	u32 value;
-
-	iowrite32(0, hw->hw_addr + REG_SPI_DATA);
-	iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
-
-	value = SPI_FLASH_CTRL_WAIT_READY |
-	    (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
-	    SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
-					     SPI_FLASH_CTRL_CLK_HI_MASK) <<
-	    SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
-					   SPI_FLASH_CTRL_CLK_LO_MASK) <<
-	    SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
-					   SPI_FLASH_CTRL_CS_HOLD_MASK) <<
-	    SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
-					    SPI_FLASH_CTRL_CS_HI_MASK) <<
-	    SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
-	    SPI_FLASH_CTRL_INS_SHIFT;
-
-	iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
-
-	value |= SPI_FLASH_CTRL_START;
-	iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
-	ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
-
-	for (i = 0; i < 10; i++) {
-		msleep(1);	/* 1ms */
-		value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
-		if (!(value & SPI_FLASH_CTRL_START))
-			break;
-	}
-
-	if (value & SPI_FLASH_CTRL_START)
-		return false;
-
-	*buf = ioread32(hw->hw_addr + REG_SPI_DATA);
-
-	return true;
-}
-
-/*
- * get_permanent_address
- * return 0 if get valid mac address,
- */
-static int atl1_get_permanent_address(struct atl1_hw *hw)
-{
-	u32 addr[2];
-	u32 i, control;
-	u16 reg;
-	u8 eth_addr[ETH_ALEN];
-	bool key_valid;
-
-	if (is_valid_ether_addr(hw->perm_mac_addr))
-		return 0;
-
-	/* init */
-	addr[0] = addr[1] = 0;
-
-	if (!atl1_check_eeprom_exist(hw)) {	/* eeprom exist */
-		reg = 0;
-		key_valid = false;
-		/* Read out all EEPROM content */
-		i = 0;
-		while (1) {
-			if (atl1_read_eeprom(hw, i + 0x100, &control)) {
-				if (key_valid) {
-					if (reg == REG_MAC_STA_ADDR)
-						addr[0] = control;
-					else if (reg == (REG_MAC_STA_ADDR + 4))
-						addr[1] = control;
-					key_valid = false;
-				} else if ((control & 0xff) == 0x5A) {
-					key_valid = true;
-					reg = (u16) (control >> 16);
-				} else
-					break;	/* assume data end while encount an invalid KEYWORD */
-			} else
-				break;	/* read error */
-			i += 4;
-		}
-
-		*(u32 *) &eth_addr[2] = swab32(addr[0]);
-		*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
-		if (is_valid_ether_addr(eth_addr)) {
-			memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
-			return 0;
-		}
-		return 1;
-	}
-
-	/* see if SPI FLAGS exist ? */
-	addr[0] = addr[1] = 0;
-	reg = 0;
-	key_valid = false;
-	i = 0;
-	while (1) {
-		if (atl1_spi_read(hw, i + 0x1f000, &control)) {
-			if (key_valid) {
-				if (reg == REG_MAC_STA_ADDR)
-					addr[0] = control;
-				else if (reg == (REG_MAC_STA_ADDR + 4))
-					addr[1] = control;
-				key_valid = false;
-			} else if ((control & 0xff) == 0x5A) {
-				key_valid = true;
-				reg = (u16) (control >> 16);
-			} else
-				break;	/* data end */
-		} else
-			break;	/* read error */
-		i += 4;
-	}
-
-	*(u32 *) &eth_addr[2] = swab32(addr[0]);
-	*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
-	if (is_valid_ether_addr(eth_addr)) {
-		memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
-		return 0;
-	}
-
-	/*
-	 * On some motherboards, the MAC address is written by the
-	 * BIOS directly to the MAC register during POST, and is
-	 * not stored in eeprom.  If all else thus far has failed
-	 * to fetch the permanent MAC address, try reading it directly.
-	 */
-	addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
-	addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
-	*(u32 *) &eth_addr[2] = swab32(addr[0]);
-	*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
-	if (is_valid_ether_addr(eth_addr)) {
-		memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
-		return 0;
-	}
-
-	return 1;
-}
-
-/*
- * Reads the adapter's MAC address from the EEPROM
- * hw - Struct containing variables accessed by shared code
- */
-s32 atl1_read_mac_addr(struct atl1_hw *hw)
-{
-	u16 i;
-
-	if (atl1_get_permanent_address(hw))
-		random_ether_addr(hw->perm_mac_addr);
-
-	for (i = 0; i < ETH_ALEN; i++)
-		hw->mac_addr[i] = hw->perm_mac_addr[i];
-	return ATL1_SUCCESS;
-}
-
-/*
- * Hashes an address to determine its location in the multicast table
- * hw - Struct containing variables accessed by shared code
- * mc_addr - the multicast address to hash
- *
- * atl1_hash_mc_addr
- *  purpose
- *      set hash value for a multicast address
- *      hash calcu processing :
- *          1. calcu 32bit CRC for multicast address
- *          2. reverse crc with MSB to LSB
- */
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
-{
-	u32 crc32, value = 0;
-	int i;
-
-	crc32 = ether_crc_le(6, mc_addr);
-	for (i = 0; i < 32; i++)
-		value |= (((crc32 >> i) & 1) << (31 - i));
-
-	return value;
-}
-
-/*
- * Sets the bit in the multicast table corresponding to the hash value.
- * hw - Struct containing variables accessed by shared code
- * hash_value - Multicast address hash value
- */
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
-{
-	u32 hash_bit, hash_reg;
-	u32 mta;
-
-	/*
-	 * The HASH Table  is a register array of 2 32-bit registers.
-	 * It is treated like an array of 64 bits.  We want to set
-	 * bit BitArray[hash_value]. So we figure out what register
-	 * the bit is in, read it, OR in the new bit, then write
-	 * back the new value.  The register is determined by the
-	 * upper 7 bits of the hash value and the bit within that
-	 * register are determined by the lower 5 bits of the value.
-	 */
-	hash_reg = (hash_value >> 31) & 0x1;
-	hash_bit = (hash_value >> 26) & 0x1F;
-	mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
-	mta |= (1 << hash_bit);
-	iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
-}
-
-/*
- * Writes a value to a PHY register
- * hw - Struct containing variables accessed by shared code
- * reg_addr - address of the PHY register to write
- * data - data to write to the PHY
- */
-s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
-{
-	int i;
-	u32 val;
-
-	val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
-	    (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
-	    MDIO_SUP_PREAMBLE |
-	    MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
-	iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
-	ioread32(hw->hw_addr + REG_MDIO_CTRL);
-
-	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
-		udelay(2);
-		val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
-		if (!(val & (MDIO_START | MDIO_BUSY)))
-			break;
-	}
-
-	if (!(val & (MDIO_START | MDIO_BUSY)))
-		return ATL1_SUCCESS;
-
-	return ATL1_ERR_PHY;
-}
-
-/*
- * Make L001's PHY out of Power Saving State (bug)
- * hw - Struct containing variables accessed by shared code
- * when power on, L001's PHY always on Power saving State
- * (Gigabit Link forbidden)
- */
-static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
-{
-	s32 ret;
-	ret = atl1_write_phy_reg(hw, 29, 0x0029);
-	if (ret)
-		return ret;
-	return atl1_write_phy_reg(hw, 30, 0);
-}
-
-/*
- *TODO: do something or get rid of this
- */
-s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
-{
-/*    s32 ret_val;
- *    u16 phy_data;
- */
-
-/*
-    ret_val = atl1_write_phy_reg(hw, ...);
-    ret_val = atl1_write_phy_reg(hw, ...);
-    ....
-*/
-	return ATL1_SUCCESS;
-}
-
-/*
- * Resets the PHY and make all config validate
- * hw - Struct containing variables accessed by shared code
- *
- * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
- */
-static s32 atl1_phy_reset(struct atl1_hw *hw)
-{
-	struct pci_dev *pdev = hw->back->pdev;
-	s32 ret_val;
-	u16 phy_data;
-
-	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
-	    hw->media_type == MEDIA_TYPE_1000M_FULL)
-		phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
-	else {
-		switch (hw->media_type) {
-		case MEDIA_TYPE_100M_FULL:
-			phy_data =
-			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
-			    MII_CR_RESET;
-			break;
-		case MEDIA_TYPE_100M_HALF:
-			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
-			break;
-		case MEDIA_TYPE_10M_FULL:
-			phy_data =
-			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
-			break;
-		default:	/* MEDIA_TYPE_10M_HALF: */
-			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
-			break;
-		}
-	}
-
-	ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
-	if (ret_val) {
-		u32 val;
-		int i;
-		/* pcie serdes link may be down! */
-		dev_dbg(&pdev->dev, "pcie phy link down\n");
-
-		for (i = 0; i < 25; i++) {
-			msleep(1);
-			val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
-			if (!(val & (MDIO_START | MDIO_BUSY)))
-				break;
-		}
-
-		if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
-			dev_warn(&pdev->dev, "pcie link down at least 25ms\n");
-			return ret_val;
-		}
-	}
-	return ATL1_SUCCESS;
-}
-
-/*
- * Configures PHY autoneg and flow control advertisement settings
- * hw - Struct containing variables accessed by shared code
- */
-s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
-{
-	s32 ret_val;
-	s16 mii_autoneg_adv_reg;
-	s16 mii_1000t_ctrl_reg;
-
-	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
-	mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
-
-	/* Read the MII 1000Base-T Control Register (Address 9). */
-	mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
-
-	/*
-	 * First we clear all the 10/100 mb speed bits in the Auto-Neg
-	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
-	 * the  1000Base-T Control Register (Address 9).
-	 */
-	mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
-	mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
-
-	/*
-	 * Need to parse media_type  and set up
-	 * the appropriate PHY registers.
-	 */
-	switch (hw->media_type) {
-	case MEDIA_TYPE_AUTO_SENSOR:
-		mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
-					MII_AR_10T_FD_CAPS |
-					MII_AR_100TX_HD_CAPS |
-					MII_AR_100TX_FD_CAPS);
-		mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
-		break;
-
-	case MEDIA_TYPE_1000M_FULL:
-		mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
-		break;
-
-	case MEDIA_TYPE_100M_FULL:
-		mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
-		break;
-
-	case MEDIA_TYPE_100M_HALF:
-		mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
-		break;
-
-	case MEDIA_TYPE_10M_FULL:
-		mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
-		break;
-
-	default:
-		mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
-		break;
-	}
-
-	/* flow control fixed to enable all */
-	mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
-
-	hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
-	hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
-
-	ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
-	if (ret_val)
-		return ret_val;
-
-	ret_val = atl1_write_phy_reg(hw, MII_AT001_CR, mii_1000t_ctrl_reg);
-	if (ret_val)
-		return ret_val;
-
-	return ATL1_SUCCESS;
-}
-
-/*
- * Configures link settings.
- * hw - Struct containing variables accessed by shared code
- * Assumes the hardware has previously been reset and the
- * transmitter and receiver are not enabled.
- */
-static s32 atl1_setup_link(struct atl1_hw *hw)
-{
-	struct pci_dev *pdev = hw->back->pdev;
-	s32 ret_val;
-
-	/*
-	 * Options:
-	 *  PHY will advertise value(s) parsed from
-	 *  autoneg_advertised and fc
-	 *  no matter what autoneg is , We will not wait link result.
-	 */
-	ret_val = atl1_phy_setup_autoneg_adv(hw);
-	if (ret_val) {
-		dev_dbg(&pdev->dev, "error setting up autonegotiation\n");
-		return ret_val;
-	}
-	/* SW.Reset , En-Auto-Neg if needed */
-	ret_val = atl1_phy_reset(hw);
-	if (ret_val) {
-		dev_dbg(&pdev->dev, "error resetting phy\n");
-		return ret_val;
-	}
-	hw->phy_configured = true;
-	return ret_val;
-}
-
-static struct atl1_spi_flash_dev flash_table[] = {
-/*	MFR_NAME  WRSR  READ  PRGM  WREN  WRDI  RDSR  RDID  SECTOR_ERASE CHIP_ERASE */
-	{"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52,        0x62},
-	{"SST",   0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20,        0x60},
-	{"ST",    0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8,        0xC7},
-};
-
-static void atl1_init_flash_opcode(struct atl1_hw *hw)
-{
-	if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
-		hw->flash_vendor = 0;	/* ATMEL */
-
-	/* Init OP table */
-	iowrite8(flash_table[hw->flash_vendor].cmd_program,
-		hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
-	iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
-		hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
-	iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
-		hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
-	iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
-		hw->hw_addr + REG_SPI_FLASH_OP_RDID);
-	iowrite8(flash_table[hw->flash_vendor].cmd_wren,
-		hw->hw_addr + REG_SPI_FLASH_OP_WREN);
-	iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
-		hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
-	iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
-		hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
-	iowrite8(flash_table[hw->flash_vendor].cmd_read,
-		hw->hw_addr + REG_SPI_FLASH_OP_READ);
-}
-
-/*
- * Performs basic configuration of the adapter.
- * hw - Struct containing variables accessed by shared code
- * Assumes that the controller has previously been reset and is in a
- * post-reset uninitialized state. Initializes multicast table,
- * and  Calls routines to setup link
- * Leaves the transmit and receive units disabled and uninitialized.
- */
-s32 atl1_init_hw(struct atl1_hw *hw)
-{
-	u32 ret_val = 0;
-
-	/* Zero out the Multicast HASH table */
-	iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
-	/* clear the old settings from the multicast hash table */
-	iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
-
-	atl1_init_flash_opcode(hw);
-
-	if (!hw->phy_configured) {
-		/* enable GPHY LinkChange Interrrupt */
-		ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
-		if (ret_val)
-			return ret_val;
-		/* make PHY out of power-saving state */
-		ret_val = atl1_phy_leave_power_saving(hw);
-		if (ret_val)
-			return ret_val;
-		/* Call a subroutine to configure the link */
-		ret_val = atl1_setup_link(hw);
-	}
-	return ret_val;
-}
-
-/*
- * Detects the current speed and duplex settings of the hardware.
- * hw - Struct containing variables accessed by shared code
- * speed - Speed of the connection
- * duplex - Duplex setting of the connection
- */
-s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
-{
-	struct pci_dev *pdev = hw->back->pdev;
-	s32 ret_val;
-	u16 phy_data;
-
-	/* ; --- Read   PHY Specific Status Register (17) */
-	ret_val = atl1_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
-	if (ret_val)
-		return ret_val;
-
-	if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
-		return ATL1_ERR_PHY_RES;
-
-	switch (phy_data & MII_AT001_PSSR_SPEED) {
-	case MII_AT001_PSSR_1000MBS:
-		*speed = SPEED_1000;
-		break;
-	case MII_AT001_PSSR_100MBS:
-		*speed = SPEED_100;
-		break;
-	case MII_AT001_PSSR_10MBS:
-		*speed = SPEED_10;
-		break;
-	default:
-		dev_dbg(&pdev->dev, "error getting speed\n");
-		return ATL1_ERR_PHY_SPEED;
-		break;
-	}
-	if (phy_data & MII_AT001_PSSR_DPLX)
-		*duplex = FULL_DUPLEX;
-	else
-		*duplex = HALF_DUPLEX;
-
-	return ATL1_SUCCESS;
-}
-
-void atl1_set_mac_addr(struct atl1_hw *hw)
-{
-	u32 value;
-	/*
-	 * 00-0B-6A-F6-00-DC
-	 * 0:  6AF600DC   1: 000B
-	 * low dword
-	 */
-	value = (((u32) hw->mac_addr[2]) << 24) |
-	    (((u32) hw->mac_addr[3]) << 16) |
-	    (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
-	iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
-	/* high dword */
-	value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
-	iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
-}

+ 0 - 946
drivers/net/atl1/atl1_hw.h

@@ -1,946 +0,0 @@
-/*
- * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
- * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
- *
- * Derived from Intel e1000 driver
- * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- * There are a lot of defines in here that are unused and/or have cryptic
- * names.  Please leave them alone, as they're the closest thing we have
- * to a spec from Attansic at present. *ahem* -- CHS
- */
-
-#ifndef _ATL1_HW_H_
-#define _ATL1_HW_H_
-
-#include <linux/types.h>
-#include <linux/mii.h>
-
-struct atl1_adapter;
-struct atl1_hw;
-
-/* function prototypes needed by multiple files */
-s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw);
-s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data);
-s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex);
-s32 atl1_read_mac_addr(struct atl1_hw *hw);
-s32 atl1_init_hw(struct atl1_hw *hw);
-s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex);
-s32 atl1_set_speed_and_duplex(struct atl1_hw *hw, u16 speed, u16 duplex);
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
-void atl1_set_mac_addr(struct atl1_hw *hw);
-s32 atl1_phy_enter_power_saving(struct atl1_hw *hw);
-s32 atl1_reset_hw(struct atl1_hw *hw);
-void atl1_check_options(struct atl1_adapter *adapter);
-
-/* register definitions */
-#define REG_PCIE_CAP_LIST			0x58
-
-#define REG_VPD_CAP				0x6C
-#define VPD_CAP_ID_MASK				0xff
-#define VPD_CAP_ID_SHIFT			0
-#define VPD_CAP_NEXT_PTR_MASK			0xFF
-#define VPD_CAP_NEXT_PTR_SHIFT			8
-#define VPD_CAP_VPD_ADDR_MASK			0x7FFF
-#define VPD_CAP_VPD_ADDR_SHIFT			16
-#define VPD_CAP_VPD_FLAG			0x80000000
-
-#define REG_VPD_DATA				0x70
-
-#define REG_SPI_FLASH_CTRL			0x200
-#define SPI_FLASH_CTRL_STS_NON_RDY		0x1
-#define SPI_FLASH_CTRL_STS_WEN			0x2
-#define SPI_FLASH_CTRL_STS_WPEN			0x80
-#define SPI_FLASH_CTRL_DEV_STS_MASK		0xFF
-#define SPI_FLASH_CTRL_DEV_STS_SHIFT		0
-#define SPI_FLASH_CTRL_INS_MASK			0x7
-#define SPI_FLASH_CTRL_INS_SHIFT		8
-#define SPI_FLASH_CTRL_START			0x800
-#define SPI_FLASH_CTRL_EN_VPD			0x2000
-#define SPI_FLASH_CTRL_LDSTART			0x8000
-#define SPI_FLASH_CTRL_CS_HI_MASK		0x3
-#define SPI_FLASH_CTRL_CS_HI_SHIFT		16
-#define SPI_FLASH_CTRL_CS_HOLD_MASK		0x3
-#define SPI_FLASH_CTRL_CS_HOLD_SHIFT		18
-#define SPI_FLASH_CTRL_CLK_LO_MASK		0x3
-#define SPI_FLASH_CTRL_CLK_LO_SHIFT		20
-#define SPI_FLASH_CTRL_CLK_HI_MASK		0x3
-#define SPI_FLASH_CTRL_CLK_HI_SHIFT		22
-#define SPI_FLASH_CTRL_CS_SETUP_MASK		0x3
-#define SPI_FLASH_CTRL_CS_SETUP_SHIFT		24
-#define SPI_FLASH_CTRL_EROM_PGSZ_MASK		0x3
-#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT		26
-#define SPI_FLASH_CTRL_WAIT_READY		0x10000000
-
-#define REG_SPI_ADDR				0x204
-
-#define REG_SPI_DATA				0x208
-
-#define REG_SPI_FLASH_CONFIG			0x20C
-#define SPI_FLASH_CONFIG_LD_ADDR_MASK		0xFFFFFF
-#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT		0
-#define SPI_FLASH_CONFIG_VPD_ADDR_MASK		0x3
-#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT		24
-#define SPI_FLASH_CONFIG_LD_EXIST		0x4000000
-
-#define REG_SPI_FLASH_OP_PROGRAM		0x210
-#define REG_SPI_FLASH_OP_SC_ERASE		0x211
-#define REG_SPI_FLASH_OP_CHIP_ERASE		0x212
-#define REG_SPI_FLASH_OP_RDID			0x213
-#define REG_SPI_FLASH_OP_WREN			0x214
-#define REG_SPI_FLASH_OP_RDSR			0x215
-#define REG_SPI_FLASH_OP_WRSR			0x216
-#define REG_SPI_FLASH_OP_READ			0x217
-
-#define REG_TWSI_CTRL				0x218
-#define TWSI_CTRL_LD_OFFSET_MASK		0xFF
-#define TWSI_CTRL_LD_OFFSET_SHIFT		0
-#define TWSI_CTRL_LD_SLV_ADDR_MASK		0x7
-#define TWSI_CTRL_LD_SLV_ADDR_SHIFT		8
-#define TWSI_CTRL_SW_LDSTART			0x800
-#define TWSI_CTRL_HW_LDSTART			0x1000
-#define TWSI_CTRL_SMB_SLV_ADDR_MASK		0x7F
-#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT		15
-#define TWSI_CTRL_LD_EXIST			0x400000
-#define TWSI_CTRL_READ_FREQ_SEL_MASK		0x3
-#define TWSI_CTRL_READ_FREQ_SEL_SHIFT		23
-#define TWSI_CTRL_FREQ_SEL_100K			0
-#define TWSI_CTRL_FREQ_SEL_200K			1
-#define TWSI_CTRL_FREQ_SEL_300K			2
-#define TWSI_CTRL_FREQ_SEL_400K			3
-#define TWSI_CTRL_SMB_SLV_ADDR
-#define TWSI_CTRL_WRITE_FREQ_SEL_MASK		0x3
-#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT		24
-
-#define REG_PCIE_DEV_MISC_CTRL			0x21C
-#define PCIE_DEV_MISC_CTRL_EXT_PIPE		0x2
-#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS		0x1
-#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST		0x4
-#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN	0x8
-#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN	0x10
-
-/* Selene Master Control Register */
-#define REG_MASTER_CTRL				0x1400
-#define MASTER_CTRL_SOFT_RST			0x1
-#define MASTER_CTRL_MTIMER_EN			0x2
-#define MASTER_CTRL_ITIMER_EN			0x4
-#define MASTER_CTRL_MANUAL_INT			0x8
-#define MASTER_CTRL_REV_NUM_SHIFT		16
-#define MASTER_CTRL_REV_NUM_MASK		0xff
-#define MASTER_CTRL_DEV_ID_SHIFT		24
-#define MASTER_CTRL_DEV_ID_MASK			0xff
-
-/* Timer Initial Value Register */
-#define REG_MANUAL_TIMER_INIT			0x1404
-
-/* IRQ ModeratorTimer Initial Value Register */
-#define REG_IRQ_MODU_TIMER_INIT			0x1408
-
-#define REG_GPHY_ENABLE				0x140C
-
-/* IRQ Anti-Lost Timer Initial Value Register */
-#define REG_CMBDISDMA_TIMER			0x140E
-
-/* Block IDLE Status Register */
-#define REG_IDLE_STATUS				0x1410
-#define IDLE_STATUS_RXMAC			1
-#define IDLE_STATUS_TXMAC			2
-#define IDLE_STATUS_RXQ				4
-#define IDLE_STATUS_TXQ				8
-#define IDLE_STATUS_DMAR			0x10
-#define IDLE_STATUS_DMAW			0x20
-#define IDLE_STATUS_SMB				0x40
-#define IDLE_STATUS_CMB				0x80
-
-/* MDIO Control Register */
-#define REG_MDIO_CTRL				0x1414
-#define MDIO_DATA_MASK				0xffff
-#define MDIO_DATA_SHIFT				0
-#define MDIO_REG_ADDR_MASK			0x1f
-#define MDIO_REG_ADDR_SHIFT			16
-#define MDIO_RW					0x200000
-#define MDIO_SUP_PREAMBLE			0x400000
-#define MDIO_START				0x800000
-#define MDIO_CLK_SEL_SHIFT			24
-#define MDIO_CLK_25_4				0
-#define MDIO_CLK_25_6				2
-#define MDIO_CLK_25_8				3
-#define MDIO_CLK_25_10				4
-#define MDIO_CLK_25_14				5
-#define MDIO_CLK_25_20				6
-#define MDIO_CLK_25_28				7
-#define MDIO_BUSY				0x8000000
-#define MDIO_WAIT_TIMES				30
-
-/* MII PHY Status Register */
-#define REG_PHY_STATUS				0x1418
-
-/* BIST Control and Status Register0 (for the Packet Memory) */
-#define REG_BIST0_CTRL				0x141c
-#define BIST0_NOW				0x1
-#define BIST0_SRAM_FAIL				0x2
-#define BIST0_FUSE_FLAG				0x4
-#define REG_BIST1_CTRL				0x1420
-#define BIST1_NOW				0x1
-#define BIST1_SRAM_FAIL				0x2
-#define BIST1_FUSE_FLAG				0x4
-
-/* MAC Control Register */
-#define REG_MAC_CTRL				0x1480
-#define MAC_CTRL_TX_EN				1
-#define MAC_CTRL_RX_EN				2
-#define MAC_CTRL_TX_FLOW			4
-#define MAC_CTRL_RX_FLOW			8
-#define MAC_CTRL_LOOPBACK			0x10
-#define MAC_CTRL_DUPLX				0x20
-#define MAC_CTRL_ADD_CRC			0x40
-#define MAC_CTRL_PAD				0x80
-#define MAC_CTRL_LENCHK				0x100
-#define MAC_CTRL_HUGE_EN			0x200
-#define MAC_CTRL_PRMLEN_SHIFT			10
-#define MAC_CTRL_PRMLEN_MASK			0xf
-#define MAC_CTRL_RMV_VLAN			0x4000
-#define MAC_CTRL_PROMIS_EN			0x8000
-#define MAC_CTRL_TX_PAUSE			0x10000
-#define MAC_CTRL_SCNT				0x20000
-#define MAC_CTRL_SRST_TX			0x40000
-#define MAC_CTRL_TX_SIMURST			0x80000
-#define MAC_CTRL_SPEED_SHIFT			20
-#define MAC_CTRL_SPEED_MASK			0x300000
-#define MAC_CTRL_SPEED_1000			2
-#define MAC_CTRL_SPEED_10_100			1
-#define MAC_CTRL_DBG_TX_BKPRESURE		0x400000
-#define MAC_CTRL_TX_HUGE			0x800000
-#define MAC_CTRL_RX_CHKSUM_EN			0x1000000
-#define MAC_CTRL_MC_ALL_EN			0x2000000
-#define MAC_CTRL_BC_EN				0x4000000
-#define MAC_CTRL_DBG				0x8000000
-
-/* MAC IPG/IFG Control Register */
-#define REG_MAC_IPG_IFG				0x1484
-#define MAC_IPG_IFG_IPGT_SHIFT			0
-#define MAC_IPG_IFG_IPGT_MASK			0x7f
-#define MAC_IPG_IFG_MIFG_SHIFT			8
-#define MAC_IPG_IFG_MIFG_MASK			0xff
-#define MAC_IPG_IFG_IPGR1_SHIFT			16
-#define MAC_IPG_IFG_IPGR1_MASK			0x7f
-#define MAC_IPG_IFG_IPGR2_SHIFT			24
-#define MAC_IPG_IFG_IPGR2_MASK			0x7f
-
-/* MAC STATION ADDRESS */
-#define REG_MAC_STA_ADDR			0x1488
-
-/* Hash table for multicast address */
-#define REG_RX_HASH_TABLE			0x1490
-
-/* MAC Half-Duplex Control Register */
-#define REG_MAC_HALF_DUPLX_CTRL			0x1498
-#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT		0
-#define MAC_HALF_DUPLX_CTRL_LCOL_MASK		0x3ff
-#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT		12
-#define MAC_HALF_DUPLX_CTRL_RETRY_MASK		0xf
-#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN		0x10000
-#define MAC_HALF_DUPLX_CTRL_NO_BACK_C		0x20000
-#define MAC_HALF_DUPLX_CTRL_NO_BACK_P		0x40000
-#define MAC_HALF_DUPLX_CTRL_ABEBE		0x80000
-#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT		20
-#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK		0xf
-#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT	24
-#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK		0xf
-
-/* Maximum Frame Length Control Register */
-#define REG_MTU					0x149c
-
-/* Wake-On-Lan control register */
-#define REG_WOL_CTRL				0x14a0
-#define WOL_PATTERN_EN				0x00000001
-#define WOL_PATTERN_PME_EN			0x00000002
-#define WOL_MAGIC_EN				0x00000004
-#define WOL_MAGIC_PME_EN			0x00000008
-#define WOL_LINK_CHG_EN				0x00000010
-#define WOL_LINK_CHG_PME_EN			0x00000020
-#define WOL_PATTERN_ST				0x00000100
-#define WOL_MAGIC_ST				0x00000200
-#define WOL_LINKCHG_ST				0x00000400
-#define WOL_CLK_SWITCH_EN			0x00008000
-#define WOL_PT0_EN				0x00010000
-#define WOL_PT1_EN				0x00020000
-#define WOL_PT2_EN				0x00040000
-#define WOL_PT3_EN				0x00080000
-#define WOL_PT4_EN				0x00100000
-#define WOL_PT5_EN				0x00200000
-#define WOL_PT6_EN				0x00400000
-
-/* WOL Length ( 2 DWORD ) */
-#define REG_WOL_PATTERN_LEN			0x14a4
-#define WOL_PT_LEN_MASK				0x7f
-#define WOL_PT0_LEN_SHIFT			0
-#define WOL_PT1_LEN_SHIFT			8
-#define WOL_PT2_LEN_SHIFT			16
-#define WOL_PT3_LEN_SHIFT			24
-#define WOL_PT4_LEN_SHIFT			0
-#define WOL_PT5_LEN_SHIFT			8
-#define WOL_PT6_LEN_SHIFT			16
-
-/* Internal SRAM Partition Register */
-#define REG_SRAM_RFD_ADDR			0x1500
-#define REG_SRAM_RFD_LEN			(REG_SRAM_RFD_ADDR+ 4)
-#define REG_SRAM_RRD_ADDR			(REG_SRAM_RFD_ADDR+ 8)
-#define REG_SRAM_RRD_LEN			(REG_SRAM_RFD_ADDR+12)
-#define REG_SRAM_TPD_ADDR			(REG_SRAM_RFD_ADDR+16)
-#define REG_SRAM_TPD_LEN			(REG_SRAM_RFD_ADDR+20)
-#define REG_SRAM_TRD_ADDR			(REG_SRAM_RFD_ADDR+24)
-#define REG_SRAM_TRD_LEN			(REG_SRAM_RFD_ADDR+28)
-#define REG_SRAM_RXF_ADDR			(REG_SRAM_RFD_ADDR+32)
-#define REG_SRAM_RXF_LEN			(REG_SRAM_RFD_ADDR+36)
-#define REG_SRAM_TXF_ADDR			(REG_SRAM_RFD_ADDR+40)
-#define REG_SRAM_TXF_LEN			(REG_SRAM_RFD_ADDR+44)
-#define REG_SRAM_TCPH_PATH_ADDR			(REG_SRAM_RFD_ADDR+48)
-#define SRAM_TCPH_ADDR_MASK			0x0fff
-#define SRAM_TCPH_ADDR_SHIFT			0
-#define SRAM_PATH_ADDR_MASK			0x0fff
-#define SRAM_PATH_ADDR_SHIFT			16
-
-/* Load Ptr Register */
-#define REG_LOAD_PTR				(REG_SRAM_RFD_ADDR+52)
-
-/* Descriptor Control register */
-#define REG_DESC_BASE_ADDR_HI			0x1540
-#define REG_DESC_RFD_ADDR_LO			(REG_DESC_BASE_ADDR_HI+4)
-#define REG_DESC_RRD_ADDR_LO			(REG_DESC_BASE_ADDR_HI+8)
-#define REG_DESC_TPD_ADDR_LO			(REG_DESC_BASE_ADDR_HI+12)
-#define REG_DESC_CMB_ADDR_LO			(REG_DESC_BASE_ADDR_HI+16)
-#define REG_DESC_SMB_ADDR_LO			(REG_DESC_BASE_ADDR_HI+20)
-#define REG_DESC_RFD_RRD_RING_SIZE		(REG_DESC_BASE_ADDR_HI+24)
-#define DESC_RFD_RING_SIZE_MASK			0x7ff
-#define DESC_RFD_RING_SIZE_SHIFT		0
-#define DESC_RRD_RING_SIZE_MASK			0x7ff
-#define DESC_RRD_RING_SIZE_SHIFT		16
-#define REG_DESC_TPD_RING_SIZE			(REG_DESC_BASE_ADDR_HI+28)
-#define DESC_TPD_RING_SIZE_MASK			0x3ff
-#define DESC_TPD_RING_SIZE_SHIFT		0
-
-/* TXQ Control Register */
-#define REG_TXQ_CTRL				0x1580
-#define TXQ_CTRL_TPD_BURST_NUM_SHIFT		0
-#define TXQ_CTRL_TPD_BURST_NUM_MASK		0x1f
-#define TXQ_CTRL_EN				0x20
-#define TXQ_CTRL_ENH_MODE			0x40
-#define TXQ_CTRL_TPD_FETCH_TH_SHIFT		8
-#define TXQ_CTRL_TPD_FETCH_TH_MASK		0x3f
-#define TXQ_CTRL_TXF_BURST_NUM_SHIFT		16
-#define TXQ_CTRL_TXF_BURST_NUM_MASK		0xffff
-
-/* Jumbo packet Threshold for task offload */
-#define REG_TX_JUMBO_TASK_TH_TPD_IPG		0x1584
-#define TX_JUMBO_TASK_TH_MASK			0x7ff
-#define TX_JUMBO_TASK_TH_SHIFT			0
-#define TX_TPD_MIN_IPG_MASK			0x1f
-#define TX_TPD_MIN_IPG_SHIFT			16
-
-/* RXQ Control Register */
-#define REG_RXQ_CTRL				0x15a0
-#define RXQ_CTRL_RFD_BURST_NUM_SHIFT		0
-#define RXQ_CTRL_RFD_BURST_NUM_MASK		0xff
-#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT		8
-#define RXQ_CTRL_RRD_BURST_THRESH_MASK		0xff
-#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT		16
-#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK		0x1f
-#define RXQ_CTRL_CUT_THRU_EN			0x40000000
-#define RXQ_CTRL_EN				0x80000000
-
-/* Rx jumbo packet threshold and rrd  retirement timer */
-#define REG_RXQ_JMBOSZ_RRDTIM			(REG_RXQ_CTRL+ 4)
-#define RXQ_JMBOSZ_TH_MASK			0x7ff
-#define RXQ_JMBOSZ_TH_SHIFT			0
-#define RXQ_JMBO_LKAH_MASK			0xf
-#define RXQ_JMBO_LKAH_SHIFT			11
-#define RXQ_RRD_TIMER_MASK			0xffff
-#define RXQ_RRD_TIMER_SHIFT			16
-
-/* RFD flow control register */
-#define REG_RXQ_RXF_PAUSE_THRESH		(REG_RXQ_CTRL+ 8)
-#define RXQ_RXF_PAUSE_TH_HI_SHIFT		16
-#define RXQ_RXF_PAUSE_TH_HI_MASK		0xfff
-#define RXQ_RXF_PAUSE_TH_LO_SHIFT		0
-#define RXQ_RXF_PAUSE_TH_LO_MASK		0xfff
-
-/* RRD flow control register */
-#define REG_RXQ_RRD_PAUSE_THRESH		(REG_RXQ_CTRL+12)
-#define RXQ_RRD_PAUSE_TH_HI_SHIFT		0
-#define RXQ_RRD_PAUSE_TH_HI_MASK		0xfff
-#define RXQ_RRD_PAUSE_TH_LO_SHIFT		16
-#define RXQ_RRD_PAUSE_TH_LO_MASK		0xfff
-
-/* DMA Engine Control Register */
-#define REG_DMA_CTRL				0x15c0
-#define DMA_CTRL_DMAR_IN_ORDER			0x1
-#define DMA_CTRL_DMAR_ENH_ORDER			0x2
-#define DMA_CTRL_DMAR_OUT_ORDER			0x4
-#define DMA_CTRL_RCB_VALUE			0x8
-#define DMA_CTRL_DMAR_BURST_LEN_SHIFT		4
-#define DMA_CTRL_DMAR_BURST_LEN_MASK		7
-#define DMA_CTRL_DMAW_BURST_LEN_SHIFT		7
-#define DMA_CTRL_DMAW_BURST_LEN_MASK		7
-#define DMA_CTRL_DMAR_EN				0x400
-#define DMA_CTRL_DMAW_EN				0x800
-
-/* CMB/SMB Control Register */
-#define REG_CSMB_CTRL				0x15d0
-#define CSMB_CTRL_CMB_NOW			1
-#define CSMB_CTRL_SMB_NOW			2
-#define CSMB_CTRL_CMB_EN			4
-#define CSMB_CTRL_SMB_EN			8
-
-/* CMB DMA Write Threshold Register */
-#define REG_CMB_WRITE_TH			(REG_CSMB_CTRL+ 4)
-#define CMB_RRD_TH_SHIFT			0
-#define CMB_RRD_TH_MASK				0x7ff
-#define CMB_TPD_TH_SHIFT			16
-#define CMB_TPD_TH_MASK				0x7ff
-
-/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */
-#define REG_CMB_WRITE_TIMER			(REG_CSMB_CTRL+ 8)
-#define CMB_RX_TM_SHIFT				0
-#define CMB_RX_TM_MASK				0xffff
-#define CMB_TX_TM_SHIFT				16
-#define CMB_TX_TM_MASK				0xffff
-
-/* Number of packet received since last CMB write */
-#define REG_CMB_RX_PKT_CNT			(REG_CSMB_CTRL+12)
-
-/* Number of packet transmitted since last CMB write */
-#define REG_CMB_TX_PKT_CNT			(REG_CSMB_CTRL+16)
-
-/* SMB auto DMA timer register */
-#define REG_SMB_TIMER				(REG_CSMB_CTRL+20)
-
-/* Mailbox Register */
-#define REG_MAILBOX				0x15f0
-#define MB_RFD_PROD_INDX_SHIFT			0
-#define MB_RFD_PROD_INDX_MASK			0x7ff
-#define MB_RRD_CONS_INDX_SHIFT			11
-#define MB_RRD_CONS_INDX_MASK			0x7ff
-#define MB_TPD_PROD_INDX_SHIFT			22
-#define MB_TPD_PROD_INDX_MASK			0x3ff
-
-/* Interrupt Status Register */
-#define REG_ISR					0x1600
-#define ISR_SMB					1
-#define ISR_TIMER				2
-#define ISR_MANUAL				4
-#define ISR_RXF_OV				8
-#define ISR_RFD_UNRUN				0x10
-#define ISR_RRD_OV				0x20
-#define ISR_TXF_UNRUN				0x40
-#define ISR_LINK				0x80
-#define ISR_HOST_RFD_UNRUN			0x100
-#define ISR_HOST_RRD_OV				0x200
-#define ISR_DMAR_TO_RST				0x400
-#define ISR_DMAW_TO_RST				0x800
-#define ISR_GPHY				0x1000
-#define ISR_RX_PKT				0x10000
-#define ISR_TX_PKT				0x20000
-#define ISR_TX_DMA				0x40000
-#define ISR_RX_DMA				0x80000
-#define ISR_CMB_RX				0x100000
-#define ISR_CMB_TX				0x200000
-#define ISR_MAC_RX				0x400000
-#define ISR_MAC_TX				0x800000
-#define ISR_UR_DETECTED				0x1000000
-#define ISR_FERR_DETECTED			0x2000000
-#define ISR_NFERR_DETECTED			0x4000000
-#define ISR_CERR_DETECTED			0x8000000
-#define ISR_PHY_LINKDOWN			0x10000000
-#define ISR_DIS_SMB				0x20000000
-#define ISR_DIS_DMA				0x40000000
-#define ISR_DIS_INT				0x80000000
-
-/* Interrupt Mask Register */
-#define REG_IMR					0x1604
-
-/* Normal Interrupt mask  */
-#define IMR_NORMAL_MASK	(\
-	ISR_SMB		|\
-	ISR_GPHY	|\
-	ISR_PHY_LINKDOWN|\
-	ISR_DMAR_TO_RST	|\
-	ISR_DMAW_TO_RST	|\
-	ISR_CMB_TX	|\
-	ISR_CMB_RX	)
-
-/* Debug Interrupt Mask  (enable all interrupt) */
-#define IMR_DEBUG_MASK	(\
-	ISR_SMB		|\
-	ISR_TIMER	|\
-	ISR_MANUAL	|\
-	ISR_RXF_OV	|\
-	ISR_RFD_UNRUN	|\
-	ISR_RRD_OV	|\
-	ISR_TXF_UNRUN	|\
-	ISR_LINK	|\
-	ISR_CMB_TX	|\
-	ISR_CMB_RX	|\
-	ISR_RX_PKT	|\
-	ISR_TX_PKT	|\
-	ISR_MAC_RX	|\
-	ISR_MAC_TX	)
-
-/* Interrupt Status Register */
-#define REG_RFD_RRD_IDX				0x1800
-#define REG_TPD_IDX				0x1804
-
-/*  MII definition */
-/* PHY Common Register */
-#define MII_AT001_CR					0x09
-#define MII_AT001_SR					0x0A
-#define MII_AT001_ESR					0x0F
-#define MII_AT001_PSCR					0x10
-#define MII_AT001_PSSR					0x11
-
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB				0x0040	/* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE				0x0080	/* Collision test enable */
-#define MII_CR_FULL_DUPLEX				0x0100	/* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG				0x0200	/* Restart auto negotiation */
-#define MII_CR_ISOLATE					0x0400	/* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN				0x0800	/* Power down */
-#define MII_CR_AUTO_NEG_EN				0x1000	/* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB				0x2000	/* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK					0x4000	/* 0 = normal, 1 = loopback */
-#define MII_CR_RESET					0x8000	/* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_MASK				0x2040
-#define MII_CR_SPEED_1000				0x0040
-#define MII_CR_SPEED_100				0x2000
-#define MII_CR_SPEED_10					0x0000
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS				0x0001	/* Extended register capabilities */
-#define MII_SR_JABBER_DETECT				0x0002	/* Jabber Detected */
-#define MII_SR_LINK_STATUS				0x0004	/* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS				0x0008	/* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT				0x0010	/* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE				0x0020	/* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS			0x0040	/* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS				0x0100	/* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS				0x0200	/* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS				0x0400	/* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS				0x0800	/* 10T   Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS				0x1000	/* 10T   Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS				0x2000	/* 100X  Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS				0x4000	/* 100X  Full Duplex Capable */
-#define MII_SR_100T4_CAPS				0x8000	/* 100T4 Capable */
-
-/* Link partner ability register. */
-#define MII_LPA_SLCT					0x001f	/* Same as advertise selector  */
-#define MII_LPA_10HALF					0x0020	/* Can do 10mbps half-duplex   */
-#define MII_LPA_10FULL					0x0040	/* Can do 10mbps full-duplex   */
-#define MII_LPA_100HALF					0x0080	/* Can do 100mbps half-duplex  */
-#define MII_LPA_100FULL					0x0100	/* Can do 100mbps full-duplex  */
-#define MII_LPA_100BASE4				0x0200	/* 100BASE-T4  */
-#define MII_LPA_PAUSE					0x0400	/* PAUSE */
-#define MII_LPA_ASYPAUSE				0x0800	/* Asymmetrical PAUSE */
-#define MII_LPA_RFAULT					0x2000	/* Link partner faulted        */
-#define MII_LPA_LPACK					0x4000	/* Link partner acked us       */
-#define MII_LPA_NPAGE					0x8000	/* Next page bit               */
-
-/* Autoneg Advertisement Register */
-#define MII_AR_SELECTOR_FIELD				0x0001	/* indicates IEEE 802.3 CSMA/CD */
-#define MII_AR_10T_HD_CAPS				0x0020	/* 10T   Half Duplex Capable */
-#define MII_AR_10T_FD_CAPS				0x0040	/* 10T   Full Duplex Capable */
-#define MII_AR_100TX_HD_CAPS				0x0080	/* 100TX Half Duplex Capable */
-#define MII_AR_100TX_FD_CAPS				0x0100	/* 100TX Full Duplex Capable */
-#define MII_AR_100T4_CAPS				0x0200	/* 100T4 Capable */
-#define MII_AR_PAUSE					0x0400	/* Pause operation desired */
-#define MII_AR_ASM_DIR					0x0800	/* Asymmetric Pause Direction bit */
-#define MII_AR_REMOTE_FAULT				0x2000	/* Remote Fault detected */
-#define MII_AR_NEXT_PAGE				0x8000	/* Next Page ability supported */
-#define MII_AR_SPEED_MASK				0x01E0
-#define MII_AR_DEFAULT_CAP_MASK				0x0DE0
-
-/* 1000BASE-T Control Register */
-#define MII_AT001_CR_1000T_HD_CAPS			0x0100	/* Advertise 1000T HD capability */
-#define MII_AT001_CR_1000T_FD_CAPS			0x0200	/* Advertise 1000T FD capability  */
-#define MII_AT001_CR_1000T_REPEATER_DTE			0x0400	/* 1=Repeater/switch device port, 0=DTE device */
-#define MII_AT001_CR_1000T_MS_VALUE			0x0800	/* 1=Configure PHY as Master, 0=Configure PHY as Slave */
-#define MII_AT001_CR_1000T_MS_ENABLE			0x1000	/* 1=Master/Slave manual config value, 0=Automatic Master/Slave config */
-#define MII_AT001_CR_1000T_TEST_MODE_NORMAL		0x0000	/* Normal Operation */
-#define MII_AT001_CR_1000T_TEST_MODE_1			0x2000	/* Transmit Waveform test */
-#define MII_AT001_CR_1000T_TEST_MODE_2			0x4000	/* Master Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_3			0x6000	/* Slave Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_4			0x8000	/* Transmitter Distortion test */
-#define MII_AT001_CR_1000T_SPEED_MASK			0x0300
-#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK		0x0300
-
-/* 1000BASE-T Status Register */
-#define MII_AT001_SR_1000T_LP_HD_CAPS			0x0400	/* LP is 1000T HD capable */
-#define MII_AT001_SR_1000T_LP_FD_CAPS			0x0800	/* LP is 1000T FD capable */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS		0x1000	/* Remote receiver OK */
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS		0x2000	/* Local receiver OK */
-#define MII_AT001_SR_1000T_MS_CONFIG_RES		0x4000	/* 1=Local TX is Master, 0=Slave */
-#define MII_AT001_SR_1000T_MS_CONFIG_FAULT		0x8000	/* Master/Slave config fault */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT	12
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT	13
-
-/* Extended Status Register */
-#define MII_AT001_ESR_1000T_HD_CAPS			0x1000	/* 1000T HD capable */
-#define MII_AT001_ESR_1000T_FD_CAPS			0x2000	/* 1000T FD capable */
-#define MII_AT001_ESR_1000X_HD_CAPS			0x4000	/* 1000X HD capable */
-#define MII_AT001_ESR_1000X_FD_CAPS			0x8000	/* 1000X FD capable */
-
-/* AT001 PHY Specific Control Register */
-#define MII_AT001_PSCR_JABBER_DISABLE			0x0001	/* 1=Jabber Function disabled */
-#define MII_AT001_PSCR_POLARITY_REVERSAL		0x0002	/* 1=Polarity Reversal enabled */
-#define MII_AT001_PSCR_SQE_TEST				0x0004	/* 1=SQE Test enabled */
-#define MII_AT001_PSCR_MAC_POWERDOWN			0x0008
-#define MII_AT001_PSCR_CLK125_DISABLE			0x0010	/* 1=CLK125 low, 0=CLK125 toggling */
-#define MII_AT001_PSCR_MDI_MANUAL_MODE			0x0000	/* MDI Crossover Mode bits 6:5, Manual MDI configuration */
-#define MII_AT001_PSCR_MDIX_MANUAL_MODE			0x0020	/* Manual MDIX configuration */
-#define MII_AT001_PSCR_AUTO_X_1000T			0x0040	/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
-#define MII_AT001_PSCR_AUTO_X_MODE			0x0060	/* Auto crossover enabled all speeds. */
-#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE		0x0080	/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold), 0=Normal 10BASE-T RX Threshold */
-#define MII_AT001_PSCR_MII_5BIT_ENABLE			0x0100	/* 1=5-Bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
-#define MII_AT001_PSCR_SCRAMBLER_DISABLE		0x0200	/* 1=Scrambler disable */
-#define MII_AT001_PSCR_FORCE_LINK_GOOD			0x0400	/* 1=Force link good */
-#define MII_AT001_PSCR_ASSERT_CRS_ON_TX			0x0800	/* 1=Assert CRS on Transmit */
-#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT		1
-#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT		5
-#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT	7
-
-/* AT001 PHY Specific Status Register */
-#define MII_AT001_PSSR_SPD_DPLX_RESOLVED		0x0800	/* 1=Speed & Duplex resolved */
-#define MII_AT001_PSSR_DPLX				0x2000	/* 1=Duplex 0=Half Duplex */
-#define MII_AT001_PSSR_SPEED				0xC000	/* Speed, bits 14:15 */
-#define MII_AT001_PSSR_10MBS				0x0000	/* 00=10Mbs */
-#define MII_AT001_PSSR_100MBS				0x4000	/* 01=100Mbs */
-#define MII_AT001_PSSR_1000MBS				0x8000	/* 10=1000Mbs */
-
-/* PCI Command Register Bit Definitions */
-#define PCI_REG_COMMAND					0x04	/* PCI Command Register */
-#define CMD_IO_SPACE					0x0001
-#define CMD_MEMORY_SPACE				0x0002
-#define CMD_BUS_MASTER					0x0004
-
-/* Wake Up Filter Control */
-#define ATL1_WUFC_LNKC	0x00000001	/* Link Status Change Wakeup Enable */
-#define ATL1_WUFC_MAG	0x00000002	/* Magic Packet Wakeup Enable */
-#define ATL1_WUFC_EX	0x00000004	/* Directed Exact Wakeup Enable */
-#define ATL1_WUFC_MC	0x00000008	/* Multicast Wakeup Enable */
-#define ATL1_WUFC_BC	0x00000010	/* Broadcast Wakeup Enable */
-
-/* Error Codes */
-#define ATL1_SUCCESS			0
-#define ATL1_ERR_EEPROM			1
-#define ATL1_ERR_PHY			2
-#define ATL1_ERR_CONFIG			3
-#define ATL1_ERR_PARAM			4
-#define ATL1_ERR_MAC_TYPE		5
-#define ATL1_ERR_PHY_TYPE		6
-#define ATL1_ERR_PHY_SPEED		7
-#define ATL1_ERR_PHY_RES		8
-
-#define SPEED_0		0xffff
-#define SPEED_10	10
-#define SPEED_100	100
-#define SPEED_1000	1000
-#define HALF_DUPLEX	1
-#define FULL_DUPLEX	2
-
-#define MEDIA_TYPE_AUTO_SENSOR	0
-#define MEDIA_TYPE_1000M_FULL	1
-#define MEDIA_TYPE_100M_FULL	2
-#define MEDIA_TYPE_100M_HALF	3
-#define MEDIA_TYPE_10M_FULL	4
-#define MEDIA_TYPE_10M_HALF	5
-
-#define ADVERTISE_10_HALF		0x0001
-#define ADVERTISE_10_FULL		0x0002
-#define ADVERTISE_100_HALF		0x0004
-#define ADVERTISE_100_FULL		0x0008
-#define ADVERTISE_1000_HALF		0x0010
-#define ADVERTISE_1000_FULL		0x0020
-#define AUTONEG_ADVERTISE_SPEED_DEFAULT	0x002F	/* Everything but 1000-Half */
-#define AUTONEG_ADVERTISE_10_100_ALL	0x000F	/* All 10/100 speeds */
-#define AUTONEG_ADVERTISE_10_ALL	0x0003	/* 10Mbps Full & Half speeds */
-
-#define MAX_JUMBO_FRAME_SIZE		0x2800
-
-#define PHY_AUTO_NEG_TIME	45	/* 4.5 Seconds */
-#define PHY_FORCE_TIME		20	/* 2.0 Seconds */
-
-/* For checksumming , the sum of all words in the EEPROM should equal 0xBABA */
-#define EEPROM_SUM		0xBABA
-
-#define ATL1_EEDUMP_LEN		48
-
-/* Statistics counters collected by the MAC */
-struct stats_msg_block {
-	/* rx */
-	u32 rx_ok;		/* The number of good packet received. */
-	u32 rx_bcast;		/* The number of good broadcast packet received. */
-	u32 rx_mcast;		/* The number of good multicast packet received. */
-	u32 rx_pause;		/* The number of Pause packet received. */
-	u32 rx_ctrl;		/* The number of Control packet received other than Pause frame. */
-	u32 rx_fcs_err;		/* The number of packets with bad FCS. */
-	u32 rx_len_err;		/* The number of packets with mismatch of length field and actual size. */
-	u32 rx_byte_cnt;	/* The number of bytes of good packet received. FCS is NOT included. */
-	u32 rx_runt;		/* The number of packets received that are less than 64 byte long and with good FCS. */
-	u32 rx_frag;		/* The number of packets received that are less than 64 byte long and with bad FCS. */
-	u32 rx_sz_64;		/* The number of good and bad packets received that are 64 byte long. */
-	u32 rx_sz_65_127;	/* The number of good and bad packets received that are between 65 and 127-byte long. */
-	u32 rx_sz_128_255;	/* The number of good and bad packets received that are between 128 and 255-byte long. */
-	u32 rx_sz_256_511;	/* The number of good and bad packets received that are between 256 and 511-byte long. */
-	u32 rx_sz_512_1023;	/* The number of good and bad packets received that are between 512 and 1023-byte long. */
-	u32 rx_sz_1024_1518;	/* The number of good and bad packets received that are between 1024 and 1518-byte long. */
-	u32 rx_sz_1519_max;	/* The number of good and bad packets received that are between 1519-byte and MTU. */
-	u32 rx_sz_ov;		/* The number of good and bad packets received that are more than MTU size šC truncated by Selene. */
-	u32 rx_rxf_ov;		/* The number of frame dropped due to occurrence of RX FIFO overflow. */
-	u32 rx_rrd_ov;		/* The number of frame dropped due to occurrence of RRD overflow. */
-	u32 rx_align_err;	/* Alignment Error */
-	u32 rx_bcast_byte_cnt;	/* The byte count of broadcast packet received, excluding FCS. */
-	u32 rx_mcast_byte_cnt;	/* The byte count of multicast packet received, excluding FCS. */
-	u32 rx_err_addr;	/* The number of packets dropped due to address filtering. */
-
-	/* tx */
-	u32 tx_ok;		/* The number of good packet transmitted. */
-	u32 tx_bcast;		/* The number of good broadcast packet transmitted. */
-	u32 tx_mcast;		/* The number of good multicast packet transmitted. */
-	u32 tx_pause;		/* The number of Pause packet transmitted. */
-	u32 tx_exc_defer;	/* The number of packets transmitted with excessive deferral. */
-	u32 tx_ctrl;		/* The number of packets transmitted is a control frame, excluding Pause frame. */
-	u32 tx_defer;		/* The number of packets transmitted that is deferred. */
-	u32 tx_byte_cnt;	/* The number of bytes of data transmitted. FCS is NOT included. */
-	u32 tx_sz_64;		/* The number of good and bad packets transmitted that are 64 byte long. */
-	u32 tx_sz_65_127;	/* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
-	u32 tx_sz_128_255;	/* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
-	u32 tx_sz_256_511;	/* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
-	u32 tx_sz_512_1023;	/* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
-	u32 tx_sz_1024_1518;	/* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
-	u32 tx_sz_1519_max;	/* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
-	u32 tx_1_col;		/* The number of packets subsequently transmitted successfully with a single prior collision. */
-	u32 tx_2_col;		/* The number of packets subsequently transmitted successfully with multiple prior collisions. */
-	u32 tx_late_col;	/* The number of packets transmitted with late collisions. */
-	u32 tx_abort_col;	/* The number of transmit packets aborted due to excessive collisions. */
-	u32 tx_underrun;	/* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
-	u32 tx_rd_eop;		/* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
-	u32 tx_len_err;		/* The number of transmit packets with length field does NOT match the actual frame size. */
-	u32 tx_trunc;		/* The number of transmit packets truncated due to size exceeding MTU. */
-	u32 tx_bcast_byte;	/* The byte count of broadcast packet transmitted, excluding FCS. */
-	u32 tx_mcast_byte;	/* The byte count of multicast packet transmitted, excluding FCS. */
-	u32 smb_updated;	/* 1: SMB Updated. This is used by software as the indication of the statistics update.
-				 * Software should clear this bit as soon as retrieving the statistics information. */
-};
-
-/* Coalescing Message Block */
-struct coals_msg_block {
-	u32 int_stats;		/* interrupt status */
-	u16 rrd_prod_idx;	/* TRD Producer Index. */
-	u16 rfd_cons_idx;	/* RFD Consumer Index. */
-	u16 update;		/* Selene sets this bit every time it DMA the CMB to host memory.
-				 * Software supposes to clear this bit when CMB information is processed. */
-	u16 tpd_cons_idx;	/* TPD Consumer Index. */
-};
-
-/* RRD descriptor */
-struct rx_return_desc {
-	u8 num_buf;		/* Number of RFD buffers used by the received packet */
-	u8 resved;
-	u16 buf_indx;		/* RFD Index of the first buffer */
-	union {
-		u32 valid;
-		struct {
-			u16 rx_chksum;
-			u16 pkt_size;
-		} xsum_sz;
-	} xsz;
-
-	u16 pkt_flg;		/* Packet flags */
-	u16 err_flg;		/* Error flags */
-	u16 resved2;
-	u16 vlan_tag;		/* VLAN TAG */
-};
-
-#define PACKET_FLAG_ETH_TYPE	0x0080
-#define PACKET_FLAG_VLAN_INS	0x0100
-#define PACKET_FLAG_ERR		0x0200
-#define PACKET_FLAG_IPV4	0x0400
-#define PACKET_FLAG_UDP		0x0800
-#define PACKET_FLAG_TCP		0x1000
-#define PACKET_FLAG_BCAST	0x2000
-#define PACKET_FLAG_MCAST	0x4000
-#define PACKET_FLAG_PAUSE	0x8000
-
-#define ERR_FLAG_CRC		0x0001
-#define ERR_FLAG_CODE		0x0002
-#define ERR_FLAG_DRIBBLE	0x0004
-#define ERR_FLAG_RUNT		0x0008
-#define ERR_FLAG_OV		0x0010
-#define ERR_FLAG_TRUNC		0x0020
-#define ERR_FLAG_IP_CHKSUM	0x0040
-#define ERR_FLAG_L4_CHKSUM	0x0080
-#define ERR_FLAG_LEN		0x0100
-#define ERR_FLAG_DES_ADDR	0x0200
-
-/* RFD descriptor */
-struct rx_free_desc {
-	__le64 buffer_addr;	/* Address of the descriptor's data buffer */
-	__le16 buf_len;		/* Size of the receive buffer in host memory, in byte */
-	u16 coalese;		/* Update consumer index to host after the reception of this frame */
-	/* __attribute__ ((packed)) is required */
-} __attribute__ ((packed));
-
-/* tsopu defines */
-#define TSO_PARAM_BUFLEN_MASK           0x3FFF
-#define TSO_PARAM_BUFLEN_SHIFT          0
-#define TSO_PARAM_DMAINT_MASK           0x0001
-#define TSO_PARAM_DMAINT_SHIFT          14
-#define TSO_PARAM_PKTNT_MASK            0x0001
-#define TSO_PARAM_PKTINT_SHIFT          15
-#define TSO_PARAM_VLANTAG_MASK          0xFFFF
-#define TSO_PARAM_VLAN_SHIFT            16
-
-/* tsopl defines */
-#define TSO_PARAM_EOP_MASK              0x0001
-#define TSO_PARAM_EOP_SHIFT             0
-#define TSO_PARAM_COALESCE_MASK         0x0001
-#define TSO_PARAM_COALESCE_SHIFT        1
-#define TSO_PARAM_INSVLAG_MASK          0x0001
-#define TSO_PARAM_INSVLAG_SHIFT         2
-#define TSO_PARAM_CUSTOMCKSUM_MASK      0x0001
-#define TSO_PARAM_CUSTOMCKSUM_SHIFT     3
-#define TSO_PARAM_SEGMENT_MASK          0x0001
-#define TSO_PARAM_SEGMENT_SHIFT         4
-#define TSO_PARAM_IPCKSUM_MASK          0x0001
-#define TSO_PARAM_IPCKSUM_SHIFT         5
-#define TSO_PARAM_TCPCKSUM_MASK         0x0001
-#define TSO_PARAM_TCPCKSUM_SHIFT        6
-#define TSO_PARAM_UDPCKSUM_MASK         0x0001
-#define TSO_PARAM_UDPCKSUM_SHIFT        7
-#define TSO_PARAM_VLANTAGGED_MASK       0x0001
-#define TSO_PARAM_VLANTAGGED_SHIFT      8
-#define TSO_PARAM_ETHTYPE_MASK          0x0001
-#define TSO_PARAM_ETHTYPE_SHIFT         9
-#define TSO_PARAM_IPHL_MASK             0x000F
-#define TSO_PARAM_IPHL_SHIFT            10
-#define TSO_PARAM_TCPHDRLEN_MASK        0x000F
-#define TSO_PARAM_TCPHDRLEN_SHIFT       14
-#define TSO_PARAM_HDRFLAG_MASK          0x0001
-#define TSO_PARAM_HDRFLAG_SHIFT         18
-#define TSO_PARAM_MSS_MASK              0x1FFF
-#define TSO_PARAM_MSS_SHIFT             19
-
-/* csumpu defines */
-#define CSUM_PARAM_BUFLEN_MASK          0x3FFF
-#define CSUM_PARAM_BUFLEN_SHIFT         0
-#define CSUM_PARAM_DMAINT_MASK          0x0001
-#define CSUM_PARAM_DMAINT_SHIFT         14
-#define CSUM_PARAM_PKTINT_MASK          0x0001
-#define CSUM_PARAM_PKTINT_SHIFT         15
-#define CSUM_PARAM_VALANTAG_MASK        0xFFFF
-#define CSUM_PARAM_VALAN_SHIFT          16
-
-/* csumpl defines*/
-#define CSUM_PARAM_EOP_MASK             0x0001
-#define CSUM_PARAM_EOP_SHIFT            0
-#define CSUM_PARAM_COALESCE_MASK        0x0001
-#define CSUM_PARAM_COALESCE_SHIFT       1
-#define CSUM_PARAM_INSVLAG_MASK         0x0001
-#define CSUM_PARAM_INSVLAG_SHIFT        2
-#define CSUM_PARAM_CUSTOMCKSUM_MASK     0x0001
-#define CSUM_PARAM_CUSTOMCKSUM_SHIFT    3
-#define CSUM_PARAM_SEGMENT_MASK         0x0001
-#define CSUM_PARAM_SEGMENT_SHIFT        4
-#define CSUM_PARAM_IPCKSUM_MASK         0x0001
-#define CSUM_PARAM_IPCKSUM_SHIFT        5
-#define CSUM_PARAM_TCPCKSUM_MASK        0x0001
-#define CSUM_PARAM_TCPCKSUM_SHIFT       6
-#define CSUM_PARAM_UDPCKSUM_MASK        0x0001
-#define CSUM_PARAM_UDPCKSUM_SHIFT       7
-#define CSUM_PARAM_VLANTAGGED_MASK      0x0001
-#define CSUM_PARAM_VLANTAGGED_SHIFT     8
-#define CSUM_PARAM_ETHTYPE_MASK         0x0001
-#define CSUM_PARAM_ETHTYPE_SHIFT        9
-#define CSUM_PARAM_IPHL_MASK            0x000F
-#define CSUM_PARAM_IPHL_SHIFT           10
-#define CSUM_PARAM_PLOADOFFSET_MASK     0x00FF
-#define CSUM_PARAM_PLOADOFFSET_SHIFT    16
-#define CSUM_PARAM_XSUMOFFSET_MASK      0x00FF
-#define CSUM_PARAM_XSUMOFFSET_SHIFT     24
-
-/* TPD descriptor */
-struct tso_param {
-        /* The order of these declarations is important -- don't change it */
-        u32 tsopu;      /* tso_param upper word */
-        u32 tsopl;      /* tso_param lower word */
-};
-
-struct csum_param {
-        /* The order of these declarations is important -- don't change it */
-        u32 csumpu;     /* csum_param upper word */
-        u32 csumpl;     /* csum_param lower word */
-};
-
-union tpd_descr {
-	u64 data;
-	struct csum_param csum;
-	struct tso_param tso;
-};
-
-struct tx_packet_desc {
-	__le64 buffer_addr;
-	union tpd_descr desc;
-};
-
-/* DMA Order Settings */
-enum atl1_dma_order {
-	atl1_dma_ord_in = 1,
-	atl1_dma_ord_enh = 2,
-	atl1_dma_ord_out = 4
-};
-
-enum atl1_dma_rcb {
-	atl1_rcb_64 = 0,
-	atl1_rcb_128 = 1
-};
-
-enum atl1_dma_req_block {
-	atl1_dma_req_128 = 0,
-	atl1_dma_req_256 = 1,
-	atl1_dma_req_512 = 2,
-	atl1_dma_req_1024 = 3,
-	atl1_dma_req_2048 = 4,
-	atl1_dma_req_4096 = 5
-};
-
-struct atl1_spi_flash_dev {
-	const char *manu_name;	/* manufacturer id */
-	/* op-code */
-	u8 cmd_wrsr;
-	u8 cmd_read;
-	u8 cmd_program;
-	u8 cmd_wren;
-	u8 cmd_wrdi;
-	u8 cmd_rdsr;
-	u8 cmd_rdid;
-	u8 cmd_sector_erase;
-	u8 cmd_chip_erase;
-};
-
-#endif	/* _ATL1_HW_H_ */

+ 0 - 203
drivers/net/atl1/atl1_param.c

@@ -1,203 +0,0 @@
-/*
- * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
- * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
- *
- * Derived from Intel e1000 driver
- * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- */
-
-#include <linux/types.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include "atl1.h"
-
-/*
- * This is the only thing that needs to be changed to adjust the
- * maximum number of ports that the driver can manage.
- */
-#define ATL1_MAX_NIC 4
-
-#define OPTION_UNSET    -1
-#define OPTION_DISABLED 0
-#define OPTION_ENABLED  1
-
-#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
-
-/*
- * Interrupt Moderate Timer in units of 2 us
- *
- * Valid Range: 10-65535
- *
- * Default Value: 100 (200us)
- */
-static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
-static int num_int_mod_timer = 0;
-module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0);
-MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
-
-/*
- * flash_vendor
- *
- * Valid Range: 0-2
- *
- * 0 - Atmel
- * 1 - SST
- * 2 - ST
- *
- * Default Value: 0
- */
-static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
-static int num_flash_vendor = 0;
-module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0);
-MODULE_PARM_DESC(flash_vendor, "SPI flash vendor");
-
-#define DEFAULT_INT_MOD_CNT	100	/* 200us */
-#define MAX_INT_MOD_CNT		65000
-#define MIN_INT_MOD_CNT		50
-
-#define FLASH_VENDOR_DEFAULT	0
-#define FLASH_VENDOR_MIN	0
-#define FLASH_VENDOR_MAX	2
-
-struct atl1_option {
-	enum { enable_option, range_option, list_option } type;
-	char *name;
-	char *err;
-	int def;
-	union {
-		struct {	/* range_option info */
-			int min;
-			int max;
-		} r;
-		struct {	/* list_option info */
-			int nr;
-			struct atl1_opt_list {
-				int i;
-				char *str;
-			} *p;
-		} l;
-	} arg;
-};
-
-static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, struct pci_dev *pdev)
-{
-	if (*value == OPTION_UNSET) {
-		*value = opt->def;
-		return 0;
-	}
-
-	switch (opt->type) {
-	case enable_option:
-		switch (*value) {
-		case OPTION_ENABLED:
-			dev_info(&pdev->dev, "%s enabled\n", opt->name);
-			return 0;
-		case OPTION_DISABLED:
-			dev_info(&pdev->dev, "%s disabled\n", opt->name);
-			return 0;
-		}
-		break;
-	case range_option:
-		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
-			dev_info(&pdev->dev, "%s set to %i\n", opt->name,
-				*value);
-			return 0;
-		}
-		break;
-	case list_option:{
-			int i;
-			struct atl1_opt_list *ent;
-
-			for (i = 0; i < opt->arg.l.nr; i++) {
-				ent = &opt->arg.l.p[i];
-				if (*value == ent->i) {
-					if (ent->str[0] != '\0')
-						dev_info(&pdev->dev, "%s\n",
-							ent->str);
-					return 0;
-				}
-			}
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
-		opt->name, *value, opt->err);
-	*value = opt->def;
-	return -1;
-}
-
-/*
- * atl1_check_options - Range Checking for Command Line Parameters
- * @adapter: board private structure
- *
- * This routine checks all command line parameters for valid user
- * input.  If an invalid value is given, or if no user specified
- * value exists, a default value is used.  The final value is stored
- * in a variable in the adapter structure.
- */
-void __devinit atl1_check_options(struct atl1_adapter *adapter)
-{
-	struct pci_dev *pdev = adapter->pdev;
-	int bd = adapter->bd_number;
-	if (bd >= ATL1_MAX_NIC) {
-		dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
-		dev_notice(&pdev->dev, "using defaults for all values\n");
-	}
-	{			/* Interrupt Moderate Timer */
-		struct atl1_option opt = {
-			.type = range_option,
-			.name = "Interrupt Moderator Timer",
-			.err = "using default of "
-				__MODULE_STRING(DEFAULT_INT_MOD_CNT),
-			.def = DEFAULT_INT_MOD_CNT,
-			.arg = {.r =
-				{.min = MIN_INT_MOD_CNT,.max = MAX_INT_MOD_CNT}}
-		};
-		int val;
-		if (num_int_mod_timer > bd) {
-			val = int_mod_timer[bd];
-			atl1_validate_option(&val, &opt, pdev);
-			adapter->imt = (u16) val;
-		} else
-			adapter->imt = (u16) (opt.def);
-	}
-
-	{			/* Flash Vendor */
-		struct atl1_option opt = {
-			.type = range_option,
-			.name = "SPI Flash Vendor",
-			.err = "using default of "
-				__MODULE_STRING(FLASH_VENDOR_DEFAULT),
-			.def = DEFAULT_INT_MOD_CNT,
-			.arg = {.r =
-				{.min = FLASH_VENDOR_MIN,.max =
-				 FLASH_VENDOR_MAX}}
-		};
-		int val;
-		if (num_flash_vendor > bd) {
-			val = flash_vendor[bd];
-			atl1_validate_option(&val, &opt, pdev);
-			adapter->hw.flash_vendor = (u8) val;
-		} else
-			adapter->hw.flash_vendor = (u8) (opt.def);
-	}
-}

+ 1 - 0
drivers/net/atlx/Makefile

@@ -0,0 +1 @@
+obj-$(CONFIG_ATL1)	+= atl1.o

+ 1636 - 522
drivers/net/atl1/atl1_main.c → drivers/net/atlx/atl1.c

@@ -1,6 +1,6 @@
 /*
  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
- * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
  * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
  *
  * Derived from Intel e1000 driver
@@ -36,7 +36,6 @@
  * A very incomplete list of things that need to be dealt with:
  *
  * TODO:
- * Fix TSO; tx performance is horrible with TSO enabled.
  * Wake on LAN.
  * Add more ethtool functions.
  * Fix abstruse irq enable/disable condition described here:
@@ -50,62 +49,768 @@
  * SMP torture testing
  */
 
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/skbuff.h>
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+
+#include <linux/compiler.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
 #include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
-#include <linux/if_ether.h>
-#include <linux/irqreturn.h>
-#include <linux/workqueue.h>
-#include <linux/timer.h>
-#include <linux/jiffies.h>
 #include <linux/hardirq.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
 #include <linux/interrupt.h>
+#include <linux/ip.h>
 #include <linux/irqflags.h>
-#include <linux/dma-mapping.h>
+#include <linux/irqreturn.h>
+#include <linux/jiffies.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
 #include <linux/pm.h>
-#include <linux/in.h>
-#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
 #include <linux/tcp.h>
-#include <linux/compiler.h>
-#include <linux/delay.h>
-#include <linux/mii.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
 #include <net/checksum.h>
 
-#include <asm/atomic.h>
-#include <asm/byteorder.h>
+#include "atl1.h"
+
+/* Temporary hack for merging atl1 and atl2 */
+#include "atlx.c"
+
+/*
+ * atl1_pci_tbl - PCI Device ID Table
+ */
+static const struct pci_device_id atl1_pci_tbl[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
+	/* required last entry */
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
+
+static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+	NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
+
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)");
+
+/*
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ * hw - Struct containing variables accessed by shared code
+ * return : 0  or  idle status (if error)
+ */
+static s32 atl1_reset_hw(struct atl1_hw *hw)
+{
+	struct pci_dev *pdev = hw->back->pdev;
+	struct atl1_adapter *adapter = hw->back;
+	u32 icr;
+	int i;
+
+	/*
+	 * Clear Interrupt mask to stop board from generating
+	 * interrupts & Clear any pending interrupt events
+	 */
+	/*
+	 * iowrite32(0, hw->hw_addr + REG_IMR);
+	 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
+	 */
+
+	/*
+	 * Issue Soft Reset to the MAC.  This will reset the chip's
+	 * transmit, receive, DMA.  It will not effect
+	 * the current PCI configuration.  The global reset bit is self-
+	 * clearing, and should clear within a microsecond.
+	 */
+	iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
+	ioread32(hw->hw_addr + REG_MASTER_CTRL);
+
+	iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
+	ioread16(hw->hw_addr + REG_PHY_ENABLE);
+
+	/* delay about 1ms */
+	msleep(1);
+
+	/* Wait at least 10ms for All module to be Idle */
+	for (i = 0; i < 10; i++) {
+		icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
+		if (!icr)
+			break;
+		/* delay 1 ms */
+		msleep(1);
+		/* FIXME: still the right way to do this? */
+		cpu_relax();
+	}
+
+	if (icr) {
+		if (netif_msg_hw(adapter))
+			dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
+		return icr;
+	}
+
+	return 0;
+}
+
+/* function about EEPROM
+ *
+ * check_eeprom_exist
+ * return 0 if eeprom exist
+ */
+static int atl1_check_eeprom_exist(struct atl1_hw *hw)
+{
+	u32 value;
+	value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
+	if (value & SPI_FLASH_CTRL_EN_VPD) {
+		value &= ~SPI_FLASH_CTRL_EN_VPD;
+		iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
+	}
+
+	value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
+	return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
+}
+
+static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
+{
+	int i;
+	u32 control;
+
+	if (offset & 3)
+		/* address do not align */
+		return false;
+
+	iowrite32(0, hw->hw_addr + REG_VPD_DATA);
+	control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
+	iowrite32(control, hw->hw_addr + REG_VPD_CAP);
+	ioread32(hw->hw_addr + REG_VPD_CAP);
+
+	for (i = 0; i < 10; i++) {
+		msleep(2);
+		control = ioread32(hw->hw_addr + REG_VPD_CAP);
+		if (control & VPD_CAP_VPD_FLAG)
+			break;
+	}
+	if (control & VPD_CAP_VPD_FLAG) {
+		*p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
+		return true;
+	}
+	/* timeout */
+	return false;
+}
+
+/*
+ * Reads the value from a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to read
+ */
+s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+	u32 val;
+	int i;
+
+	val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
+		MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
+		MDIO_CLK_SEL_SHIFT;
+	iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
+	ioread32(hw->hw_addr + REG_MDIO_CTRL);
+
+	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+		udelay(2);
+		val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
+		if (!(val & (MDIO_START | MDIO_BUSY)))
+			break;
+	}
+	if (!(val & (MDIO_START | MDIO_BUSY))) {
+		*phy_data = (u16) val;
+		return 0;
+	}
+	return ATLX_ERR_PHY;
+}
+
+#define CUSTOM_SPI_CS_SETUP	2
+#define CUSTOM_SPI_CLK_HI	2
+#define CUSTOM_SPI_CLK_LO	2
+#define CUSTOM_SPI_CS_HOLD	2
+#define CUSTOM_SPI_CS_HI	3
+
+static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
+{
+	int i;
+	u32 value;
+
+	iowrite32(0, hw->hw_addr + REG_SPI_DATA);
+	iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
+
+	value = SPI_FLASH_CTRL_WAIT_READY |
+	    (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
+	    SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
+					     SPI_FLASH_CTRL_CLK_HI_MASK) <<
+	    SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
+					   SPI_FLASH_CTRL_CLK_LO_MASK) <<
+	    SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
+					   SPI_FLASH_CTRL_CS_HOLD_MASK) <<
+	    SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
+					    SPI_FLASH_CTRL_CS_HI_MASK) <<
+	    SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
+	    SPI_FLASH_CTRL_INS_SHIFT;
+
+	iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
+
+	value |= SPI_FLASH_CTRL_START;
+	iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
+	ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
+
+	for (i = 0; i < 10; i++) {
+		msleep(1);
+		value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
+		if (!(value & SPI_FLASH_CTRL_START))
+			break;
+	}
+
+	if (value & SPI_FLASH_CTRL_START)
+		return false;
+
+	*buf = ioread32(hw->hw_addr + REG_SPI_DATA);
+
+	return true;
+}
+
+/*
+ * get_permanent_address
+ * return 0 if get valid mac address,
+ */
+static int atl1_get_permanent_address(struct atl1_hw *hw)
+{
+	u32 addr[2];
+	u32 i, control;
+	u16 reg;
+	u8 eth_addr[ETH_ALEN];
+	bool key_valid;
+
+	if (is_valid_ether_addr(hw->perm_mac_addr))
+		return 0;
+
+	/* init */
+	addr[0] = addr[1] = 0;
+
+	if (!atl1_check_eeprom_exist(hw)) {
+		reg = 0;
+		key_valid = false;
+		/* Read out all EEPROM content */
+		i = 0;
+		while (1) {
+			if (atl1_read_eeprom(hw, i + 0x100, &control)) {
+				if (key_valid) {
+					if (reg == REG_MAC_STA_ADDR)
+						addr[0] = control;
+					else if (reg == (REG_MAC_STA_ADDR + 4))
+						addr[1] = control;
+					key_valid = false;
+				} else if ((control & 0xff) == 0x5A) {
+					key_valid = true;
+					reg = (u16) (control >> 16);
+				} else
+					break;
+			} else
+				/* read error */
+				break;
+			i += 4;
+		}
+
+		*(u32 *) &eth_addr[2] = swab32(addr[0]);
+		*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
+		if (is_valid_ether_addr(eth_addr)) {
+			memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+			return 0;
+		}
+		return 1;
+	}
+
+	/* see if SPI FLAGS exist ? */
+	addr[0] = addr[1] = 0;
+	reg = 0;
+	key_valid = false;
+	i = 0;
+	while (1) {
+		if (atl1_spi_read(hw, i + 0x1f000, &control)) {
+			if (key_valid) {
+				if (reg == REG_MAC_STA_ADDR)
+					addr[0] = control;
+				else if (reg == (REG_MAC_STA_ADDR + 4))
+					addr[1] = control;
+				key_valid = false;
+			} else if ((control & 0xff) == 0x5A) {
+				key_valid = true;
+				reg = (u16) (control >> 16);
+			} else
+				/* data end */
+				break;
+		} else
+			/* read error */
+			break;
+		i += 4;
+	}
+
+	*(u32 *) &eth_addr[2] = swab32(addr[0]);
+	*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
+	if (is_valid_ether_addr(eth_addr)) {
+		memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+		return 0;
+	}
+
+	/*
+	 * On some motherboards, the MAC address is written by the
+	 * BIOS directly to the MAC register during POST, and is
+	 * not stored in eeprom.  If all else thus far has failed
+	 * to fetch the permanent MAC address, try reading it directly.
+	 */
+	addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
+	addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
+	*(u32 *) &eth_addr[2] = swab32(addr[0]);
+	*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
+	if (is_valid_ether_addr(eth_addr)) {
+		memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+		return 0;
+	}
+
+	return 1;
+}
+
+/*
+ * Reads the adapter's MAC address from the EEPROM
+ * hw - Struct containing variables accessed by shared code
+ */
+s32 atl1_read_mac_addr(struct atl1_hw *hw)
+{
+	u16 i;
+
+	if (atl1_get_permanent_address(hw))
+		random_ether_addr(hw->perm_mac_addr);
+
+	for (i = 0; i < ETH_ALEN; i++)
+		hw->mac_addr[i] = hw->perm_mac_addr[i];
+	return 0;
+}
+
+/*
+ * Hashes an address to determine its location in the multicast table
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *
+ * atl1_hash_mc_addr
+ *  purpose
+ *      set hash value for a multicast address
+ *      hash calcu processing :
+ *          1. calcu 32bit CRC for multicast address
+ *          2. reverse crc with MSB to LSB
+ */
+u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
+{
+	u32 crc32, value = 0;
+	int i;
+
+	crc32 = ether_crc_le(6, mc_addr);
+	for (i = 0; i < 32; i++)
+		value |= (((crc32 >> i) & 1) << (31 - i));
+
+	return value;
+}
+
+/*
+ * Sets the bit in the multicast table corresponding to the hash value.
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ */
+void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
+{
+	u32 hash_bit, hash_reg;
+	u32 mta;
+
+	/*
+	 * The HASH Table  is a register array of 2 32-bit registers.
+	 * It is treated like an array of 64 bits.  We want to set
+	 * bit BitArray[hash_value]. So we figure out what register
+	 * the bit is in, read it, OR in the new bit, then write
+	 * back the new value.  The register is determined by the
+	 * upper 7 bits of the hash value and the bit within that
+	 * register are determined by the lower 5 bits of the value.
+	 */
+	hash_reg = (hash_value >> 31) & 0x1;
+	hash_bit = (hash_value >> 26) & 0x1F;
+	mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
+	mta |= (1 << hash_bit);
+	iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
+}
+
+/*
+ * Writes a value to a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to write
+ * data - data to write to the PHY
+ */
+static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
+{
+	int i;
+	u32 val;
+
+	val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
+	    (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
+	    MDIO_SUP_PREAMBLE |
+	    MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+	iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
+	ioread32(hw->hw_addr + REG_MDIO_CTRL);
+
+	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+		udelay(2);
+		val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
+		if (!(val & (MDIO_START | MDIO_BUSY)))
+			break;
+	}
+
+	if (!(val & (MDIO_START | MDIO_BUSY)))
+		return 0;
+
+	return ATLX_ERR_PHY;
+}
+
+/*
+ * Make L001's PHY out of Power Saving State (bug)
+ * hw - Struct containing variables accessed by shared code
+ * when power on, L001's PHY always on Power saving State
+ * (Gigabit Link forbidden)
+ */
+static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
+{
+	s32 ret;
+	ret = atl1_write_phy_reg(hw, 29, 0x0029);
+	if (ret)
+		return ret;
+	return atl1_write_phy_reg(hw, 30, 0);
+}
+
+/*
+ *TODO: do something or get rid of this
+ */
+#ifdef CONFIG_PM
+static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
+{
+/*    s32 ret_val;
+ *    u16 phy_data;
+ */
+
+/*
+    ret_val = atl1_write_phy_reg(hw, ...);
+    ret_val = atl1_write_phy_reg(hw, ...);
+    ....
+*/
+	return 0;
+}
+#endif
+
+/*
+ * Resets the PHY and make all config validate
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
+ */
+static s32 atl1_phy_reset(struct atl1_hw *hw)
+{
+	struct pci_dev *pdev = hw->back->pdev;
+	struct atl1_adapter *adapter = hw->back;
+	s32 ret_val;
+	u16 phy_data;
+
+	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+	    hw->media_type == MEDIA_TYPE_1000M_FULL)
+		phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
+	else {
+		switch (hw->media_type) {
+		case MEDIA_TYPE_100M_FULL:
+			phy_data =
+			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
+			    MII_CR_RESET;
+			break;
+		case MEDIA_TYPE_100M_HALF:
+			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+			break;
+		case MEDIA_TYPE_10M_FULL:
+			phy_data =
+			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
+			break;
+		default:
+			/* MEDIA_TYPE_10M_HALF: */
+			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+			break;
+		}
+	}
+
+	ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+	if (ret_val) {
+		u32 val;
+		int i;
+		/* pcie serdes link may be down! */
+		if (netif_msg_hw(adapter))
+			dev_dbg(&pdev->dev, "pcie phy link down\n");
+
+		for (i = 0; i < 25; i++) {
+			msleep(1);
+			val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
+			if (!(val & (MDIO_START | MDIO_BUSY)))
+				break;
+		}
+
+		if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
+			if (netif_msg_hw(adapter))
+				dev_warn(&pdev->dev,
+					"pcie link down at least 25ms\n");
+			return ret_val;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Configures PHY autoneg and flow control advertisement settings
+ * hw - Struct containing variables accessed by shared code
+ */
+static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
+{
+	s32 ret_val;
+	s16 mii_autoneg_adv_reg;
+	s16 mii_1000t_ctrl_reg;
+
+	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
+	mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
+
+	/* Read the MII 1000Base-T Control Register (Address 9). */
+	mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
+
+	/*
+	 * First we clear all the 10/100 mb speed bits in the Auto-Neg
+	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
+	 * the  1000Base-T Control Register (Address 9).
+	 */
+	mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+	mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
+
+	/*
+	 * Need to parse media_type  and set up
+	 * the appropriate PHY registers.
+	 */
+	switch (hw->media_type) {
+	case MEDIA_TYPE_AUTO_SENSOR:
+		mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
+					MII_AR_10T_FD_CAPS |
+					MII_AR_100TX_HD_CAPS |
+					MII_AR_100TX_FD_CAPS);
+		mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
+		break;
+
+	case MEDIA_TYPE_1000M_FULL:
+		mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
+		break;
+
+	case MEDIA_TYPE_100M_FULL:
+		mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+		break;
+
+	case MEDIA_TYPE_100M_HALF:
+		mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+		break;
+
+	case MEDIA_TYPE_10M_FULL:
+		mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+		break;
+
+	default:
+		mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+		break;
+	}
+
+	/* flow control fixed to enable all */
+	mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+
+	hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
+	hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
+
+	ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
+	if (ret_val)
+		return ret_val;
+
+	return 0;
+}
+
+/*
+ * Configures link settings.
+ * hw - Struct containing variables accessed by shared code
+ * Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ */
+static s32 atl1_setup_link(struct atl1_hw *hw)
+{
+	struct pci_dev *pdev = hw->back->pdev;
+	struct atl1_adapter *adapter = hw->back;
+	s32 ret_val;
+
+	/*
+	 * Options:
+	 *  PHY will advertise value(s) parsed from
+	 *  autoneg_advertised and fc
+	 *  no matter what autoneg is , We will not wait link result.
+	 */
+	ret_val = atl1_phy_setup_autoneg_adv(hw);
+	if (ret_val) {
+		if (netif_msg_link(adapter))
+			dev_dbg(&pdev->dev,
+				"error setting up autonegotiation\n");
+		return ret_val;
+	}
+	/* SW.Reset , En-Auto-Neg if needed */
+	ret_val = atl1_phy_reset(hw);
+	if (ret_val) {
+		if (netif_msg_link(adapter))
+			dev_dbg(&pdev->dev, "error resetting phy\n");
+		return ret_val;
+	}
+	hw->phy_configured = true;
+	return ret_val;
+}
 
-#include "atl1.h"
+static void atl1_init_flash_opcode(struct atl1_hw *hw)
+{
+	if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
+		/* Atmel */
+		hw->flash_vendor = 0;
+
+	/* Init OP table */
+	iowrite8(flash_table[hw->flash_vendor].cmd_program,
+		hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
+	iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
+		hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
+	iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
+		hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
+	iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
+		hw->hw_addr + REG_SPI_FLASH_OP_RDID);
+	iowrite8(flash_table[hw->flash_vendor].cmd_wren,
+		hw->hw_addr + REG_SPI_FLASH_OP_WREN);
+	iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
+		hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
+	iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
+		hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
+	iowrite8(flash_table[hw->flash_vendor].cmd_read,
+		hw->hw_addr + REG_SPI_FLASH_OP_READ);
+}
 
-#define DRIVER_VERSION "2.0.7"
+/*
+ * Performs basic configuration of the adapter.
+ * hw - Struct containing variables accessed by shared code
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes multicast table,
+ * and  Calls routines to setup link
+ * Leaves the transmit and receive units disabled and uninitialized.
+ */
+static s32 atl1_init_hw(struct atl1_hw *hw)
+{
+	u32 ret_val = 0;
 
-char atl1_driver_name[] = "atl1";
-static const char atl1_driver_string[] = "Attansic L1 Ethernet Network Driver";
-static const char atl1_copyright[] = "Copyright(c) 2005-2006 Attansic Corporation.";
-char atl1_driver_version[] = DRIVER_VERSION;
+	/* Zero out the Multicast HASH table */
+	iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
+	/* clear the old settings from the multicast hash table */
+	iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
 
-MODULE_AUTHOR
-    ("Attansic Corporation <xiong_huang@attansic.com>, Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
-MODULE_DESCRIPTION("Attansic 1000M Ethernet Network Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
+	atl1_init_flash_opcode(hw);
+
+	if (!hw->phy_configured) {
+		/* enable GPHY LinkChange Interrrupt */
+		ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
+		if (ret_val)
+			return ret_val;
+		/* make PHY out of power-saving state */
+		ret_val = atl1_phy_leave_power_saving(hw);
+		if (ret_val)
+			return ret_val;
+		/* Call a subroutine to configure the link */
+		ret_val = atl1_setup_link(hw);
+	}
+	return ret_val;
+}
 
 /*
- * atl1_pci_tbl - PCI Device ID Table
+ * Detects the current speed and duplex settings of the hardware.
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
  */
-static const struct pci_device_id atl1_pci_tbl[] = {
-	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
-	/* required last entry */
-	{0,}
-};
+static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
+{
+	struct pci_dev *pdev = hw->back->pdev;
+	struct atl1_adapter *adapter = hw->back;
+	s32 ret_val;
+	u16 phy_data;
 
-MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
+	/* ; --- Read   PHY Specific Status Register (17) */
+	ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
+		return ATLX_ERR_PHY_RES;
+
+	switch (phy_data & MII_ATLX_PSSR_SPEED) {
+	case MII_ATLX_PSSR_1000MBS:
+		*speed = SPEED_1000;
+		break;
+	case MII_ATLX_PSSR_100MBS:
+		*speed = SPEED_100;
+		break;
+	case MII_ATLX_PSSR_10MBS:
+		*speed = SPEED_10;
+		break;
+	default:
+		if (netif_msg_hw(adapter))
+			dev_dbg(&pdev->dev, "error getting speed\n");
+		return ATLX_ERR_PHY_SPEED;
+		break;
+	}
+	if (phy_data & MII_ATLX_PSSR_DPLX)
+		*duplex = FULL_DUPLEX;
+	else
+		*duplex = HALF_DUPLEX;
+
+	return 0;
+}
+
+void atl1_set_mac_addr(struct atl1_hw *hw)
+{
+	u32 value;
+	/*
+	 * 00-0B-6A-F6-00-DC
+	 * 0:  6AF600DC   1: 000B
+	 * low dword
+	 */
+	value = (((u32) hw->mac_addr[2]) << 24) |
+	    (((u32) hw->mac_addr[3]) << 16) |
+	    (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
+	iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
+	/* high dword */
+	value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
+	iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
+}
 
 /*
  * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
@@ -125,7 +830,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
 
 	adapter->wol = 0;
 	adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
-	adapter->ict = 50000;	/* 100ms */
+	adapter->ict = 50000;		/* 100ms */
 	adapter->link_speed = SPEED_0;	/* hardware init */
 	adapter->link_duplex = FULL_DUPLEX;
 
@@ -205,31 +910,13 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 	return retval;
 }
 
-/*
- * atl1_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
-static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
-	switch (cmd) {
-	case SIOCGMIIPHY:
-	case SIOCGMIIREG:
-	case SIOCSMIIREG:
-		return atl1_mii_ioctl(netdev, ifr, cmd);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
 /*
  * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
  * @adapter: board private structure
  *
  * Return 0 on success, negative on failure
  */
-s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
+static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
 {
 	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
 	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
@@ -242,13 +929,16 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
 	size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
 	tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
 	if (unlikely(!tpd_ring->buffer_info)) {
-		dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size);
+		if (netif_msg_drv(adapter))
+			dev_err(&pdev->dev, "kzalloc failed , size = D%d\n",
+				size);
 		goto err_nomem;
 	}
 	rfd_ring->buffer_info =
 		(struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
 
-	/* real ring DMA buffer
+	/*
+	 * real ring DMA buffer
 	 * each ring/block may need up to 8 bytes for alignment, hence the
 	 * additional 40 bytes tacked onto the end.
 	 */
@@ -263,7 +953,8 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
 	ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
 		&ring_header->dma);
 	if (unlikely(!ring_header->desc)) {
-		dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
+		if (netif_msg_drv(adapter))
+			dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
 		goto err_nomem;
 	}
 
@@ -307,7 +998,7 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
 		((u8 *) adapter->cmb.cmb +
 		(sizeof(struct coals_msg_block) + offset));
 
-	return ATL1_SUCCESS;
+	return 0;
 
 err_nomem:
 	kfree(tpd_ring->buffer_info);
@@ -416,7 +1107,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
  *
  * Free all transmit software resources
  */
-void atl1_free_ring_resources(struct atl1_adapter *adapter)
+static void atl1_free_ring_resources(struct atl1_adapter *adapter)
 {
 	struct pci_dev *pdev = adapter->pdev;
 	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
@@ -481,31 +1172,6 @@ static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
 	iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
 }
 
-/*
- * atl1_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- */
-static int atl1_set_mac(struct net_device *netdev, void *p)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct sockaddr *addr = p;
-
-	if (netif_running(netdev))
-		return -EBUSY;
-
-	if (!is_valid_ether_addr(addr->sa_data))
-		return -EADDRNOTAVAIL;
-
-	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
-
-	atl1_set_mac_addr(&adapter->hw);
-	return 0;
-}
-
 static u32 atl1_check_link(struct atl1_adapter *adapter)
 {
 	struct atl1_hw *hw = &adapter->hw;
@@ -517,14 +1183,17 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
 	/* MII_BMSR must read twice */
 	atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
 	atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
-	if (!(phy_data & BMSR_LSTATUS)) {	/* link down */
-		if (netif_carrier_ok(netdev)) {	/* old link state: Up */
-			dev_info(&adapter->pdev->dev, "link is down\n");
+	if (!(phy_data & BMSR_LSTATUS)) {
+		/* link down */
+		if (netif_carrier_ok(netdev)) {
+			/* old link state: Up */
+			if (netif_msg_link(adapter))
+				dev_info(&adapter->pdev->dev, "link is down\n");
 			adapter->link_speed = SPEED_0;
 			netif_carrier_off(netdev);
 			netif_stop_queue(netdev);
 		}
-		return ATL1_SUCCESS;
+		return 0;
 	}
 
 	/* Link Up */
@@ -562,20 +1231,22 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
 			adapter->link_speed = speed;
 			adapter->link_duplex = duplex;
 			atl1_setup_mac_ctrl(adapter);
-			dev_info(&adapter->pdev->dev,
-				"%s link is up %d Mbps %s\n",
-				netdev->name, adapter->link_speed,
-				adapter->link_duplex == FULL_DUPLEX ?
-				"full duplex" : "half duplex");
+			if (netif_msg_link(adapter))
+				dev_info(&adapter->pdev->dev,
+					"%s link is up %d Mbps %s\n",
+					netdev->name, adapter->link_speed,
+					adapter->link_duplex == FULL_DUPLEX ?
+					"full duplex" : "half duplex");
 		}
-		if (!netif_carrier_ok(netdev)) {	/* Link down -> Up */
+		if (!netif_carrier_ok(netdev)) {
+			/* Link down -> Up */
 			netif_carrier_on(netdev);
 			netif_wake_queue(netdev);
 		}
-		return ATL1_SUCCESS;
+		return 0;
 	}
 
-	/* change orignal link status */
+	/* change original link status */
 	if (netif_carrier_ok(netdev)) {
 		adapter->link_speed = SPEED_0;
 		netif_carrier_off(netdev);
@@ -596,12 +1267,13 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
 			phy_data =
 			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
 			break;
-		default:	/* MEDIA_TYPE_10M_HALF: */
+		default:
+			/* MEDIA_TYPE_10M_HALF: */
 			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
 			break;
 		}
 		atl1_write_phy_reg(hw, MII_BMCR, phy_data);
-		return ATL1_SUCCESS;
+		return 0;
 	}
 
 	/* auto-neg, insert timer to re-config phy */
@@ -610,103 +1282,6 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
 		mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
 	}
 
-	return ATL1_SUCCESS;
-}
-
-static void atl1_check_for_link(struct atl1_adapter *adapter)
-{
-	struct net_device *netdev = adapter->netdev;
-	u16 phy_data = 0;
-
-	spin_lock(&adapter->lock);
-	adapter->phy_timer_pending = false;
-	atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
-	atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
-	spin_unlock(&adapter->lock);
-
-	/* notify upper layer link down ASAP */
-	if (!(phy_data & BMSR_LSTATUS)) {	/* Link Down */
-		if (netif_carrier_ok(netdev)) {	/* old link state: Up */
-			dev_info(&adapter->pdev->dev, "%s link is down\n",
-				netdev->name);
-			adapter->link_speed = SPEED_0;
-			netif_carrier_off(netdev);
-			netif_stop_queue(netdev);
-		}
-	}
-	schedule_work(&adapter->link_chg_task);
-}
-
-/*
- * atl1_set_multi - Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
- */
-static void atl1_set_multi(struct net_device *netdev)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	struct atl1_hw *hw = &adapter->hw;
-	struct dev_mc_list *mc_ptr;
-	u32 rctl;
-	u32 hash_value;
-
-	/* Check for Promiscuous and All Multicast modes */
-	rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
-	if (netdev->flags & IFF_PROMISC)
-		rctl |= MAC_CTRL_PROMIS_EN;
-	else if (netdev->flags & IFF_ALLMULTI) {
-		rctl |= MAC_CTRL_MC_ALL_EN;
-		rctl &= ~MAC_CTRL_PROMIS_EN;
-	} else
-		rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
-
-	iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
-
-	/* clear the old settings from the multicast hash table */
-	iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
-	iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
-
-	/* compute mc addresses' hash value ,and put it into hash table */
-	for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
-		hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr);
-		atl1_hash_set(hw, hash_value);
-	}
-}
-
-/*
- * atl1_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
- *
- * Returns 0 on success, negative on failure
- */
-static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	int old_mtu = netdev->mtu;
-	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
-	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
-	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-		dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
-		return -EINVAL;
-	}
-
-	adapter->hw.max_frame_size = max_frame;
-	adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
-	adapter->rx_buffer_len = (max_frame + 7) & ~7;
-	adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
-
-	netdev->mtu = new_mtu;
-	if ((old_mtu != new_mtu) && netif_running(netdev)) {
-		atl1_down(adapter);
-		atl1_up(adapter);
-	}
-
 	return 0;
 }
 
@@ -974,37 +1549,6 @@ static void atl1_via_workaround(struct atl1_adapter *adapter)
 	iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
 }
 
-/*
- * atl1_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- */
-static void atl1_irq_enable(struct atl1_adapter *adapter)
-{
-	iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
-	ioread32(adapter->hw.hw_addr + REG_IMR);
-}
-
-/*
- * atl1_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- */
-static void atl1_irq_disable(struct atl1_adapter *adapter)
-{
-	iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-	ioread32(adapter->hw.hw_addr + REG_IMR);
-	synchronize_irq(adapter->pdev->irq);
-}
-
-static void atl1_clear_phy_int(struct atl1_adapter *adapter)
-{
-	u16 phy_data;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adapter->lock, flags);
-	atl1_read_phy_reg(&adapter->hw, 19, &phy_data);
-	spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
 static void atl1_inc_smb(struct atl1_adapter *adapter)
 {
 	struct stats_msg_block *smb = adapter->smb.smb;
@@ -1076,19 +1620,6 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
 		adapter->soft_stats.tx_carrier_errors;
 }
 
-/*
- * atl1_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- */
-static struct net_device_stats *atl1_get_stats(struct net_device *netdev)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	return &adapter->net_stats;
-}
-
 static void atl1_update_mailbox(struct atl1_adapter *adapter)
 {
 	unsigned long flags;
@@ -1150,8 +1681,9 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
 		if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
 					ERR_FLAG_CODE | ERR_FLAG_OV)) {
 			adapter->hw_csum_err++;
-			dev_printk(KERN_DEBUG, &pdev->dev,
-				"rx checksum error\n");
+			if (netif_msg_rx_err(adapter))
+				dev_printk(KERN_DEBUG, &pdev->dev,
+					"rx checksum error\n");
 			return;
 		}
 	}
@@ -1170,9 +1702,10 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
 	}
 
 	/* IPv4, but hardware thinks its checksum is wrong */
-	dev_printk(KERN_DEBUG, &pdev->dev,
-		"hw csum wrong, pkt_flag:%x, err_flag:%x\n",
-		rrd->pkt_flg, rrd->err_flg);
+	if (netif_msg_rx_err(adapter))
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			"hw csum wrong, pkt_flag:%x, err_flag:%x\n",
+			rrd->pkt_flg, rrd->err_flg);
 	skb->ip_summed = CHECKSUM_COMPLETE;
 	skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
 	adapter->hw_csum_err++;
@@ -1210,7 +1743,8 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
 		rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
 
 		skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
-		if (unlikely(!skb)) {	/* Better luck next round */
+		if (unlikely(!skb)) {
+			/* Better luck next round */
 			adapter->net_stats.rx_dropped++;
 			break;
 		}
@@ -1281,18 +1815,39 @@ chk_rrd:
 			/* check rrd status */
 			if (likely(rrd->num_buf == 1))
 				goto rrd_ok;
+			else if (netif_msg_rx_err(adapter)) {
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"unexpected RRD buffer count\n");
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"rx_buf_len = %d\n",
+					adapter->rx_buffer_len);
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"RRD num_buf = %d\n",
+					rrd->num_buf);
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"RRD pkt_len = %d\n",
+					rrd->xsz.xsum_sz.pkt_size);
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"RRD pkt_flg = 0x%08X\n",
+					rrd->pkt_flg);
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"RRD err_flg = 0x%08X\n",
+					rrd->err_flg);
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"RRD vlan_tag = 0x%08X\n",
+					rrd->vlan_tag);
+			}
 
 			/* rrd seems to be bad */
 			if (unlikely(i-- > 0)) {
 				/* rrd may not be DMAed completely */
-				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
-					"incomplete RRD DMA transfer\n");
 				udelay(1);
 				goto chk_rrd;
 			}
 			/* bad rrd */
-			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
-				"bad RRD\n");
+			if (netif_msg_rx_err(adapter))
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"bad RRD\n");
 			/* see if update RFD index */
 			if (rrd->num_buf > 1)
 				atl1_update_rfd_index(adapter, rrd);
@@ -1411,8 +1966,6 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
 			dev_kfree_skb_irq(buffer_info->skb);
 			buffer_info->skb = NULL;
 		}
-		tpd->buffer_addr = 0;
-		tpd->desc.data = 0;
 
 		if (++sw_tpd_next_to_clean == tpd_ring->count)
 			sw_tpd_next_to_clean = 0;
@@ -1434,167 +1987,192 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
 }
 
 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
-			 struct tso_param *tso)
+	struct tx_packet_desc *ptpd)
 {
-	/* We enter this function holding a spinlock. */
-	u8 ipofst;
+	/* spinlock held */
+	u8 hdr_len, ip_off;
+	u32 real_len;
 	int err;
 
 	if (skb_shinfo(skb)->gso_size) {
 		if (skb_header_cloned(skb)) {
 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 			if (unlikely(err))
-				return err;
+				return -1;
 		}
 
 		if (skb->protocol == ntohs(ETH_P_IP)) {
 			struct iphdr *iph = ip_hdr(skb);
 
-			iph->tot_len = 0;
+			real_len = (((unsigned char *)iph - skb->data) +
+				ntohs(iph->tot_len));
+			if (real_len < skb->len)
+				pskb_trim(skb, real_len);
+			hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+			if (skb->len == hdr_len) {
+				iph->check = 0;
+				tcp_hdr(skb)->check =
+					~csum_tcpudp_magic(iph->saddr,
+					iph->daddr, tcp_hdrlen(skb),
+					IPPROTO_TCP, 0);
+				ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
+					TPD_IPHL_SHIFT;
+				ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
+					TPD_TCPHDRLEN_MASK) <<
+					TPD_TCPHDRLEN_SHIFT;
+				ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
+				ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
+				return 1;
+			}
+
 			iph->check = 0;
 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-				iph->daddr, 0, IPPROTO_TCP, 0);
-			ipofst = skb_network_offset(skb);
-			if (ipofst != ETH_HLEN) /* 802.3 frame */
-				tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
-
-			tso->tsopl |= (iph->ihl &
-				CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
-			tso->tsopl |= (tcp_hdrlen(skb) &
-				TSO_PARAM_TCPHDRLEN_MASK) <<
-				TSO_PARAM_TCPHDRLEN_SHIFT;
-			tso->tsopl |= (skb_shinfo(skb)->gso_size &
-				TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
-			tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT;
-			tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT;
-			tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT;
-			return true;
+					iph->daddr, 0, IPPROTO_TCP, 0);
+			ip_off = (unsigned char *)iph -
+				(unsigned char *) skb_network_header(skb);
+			if (ip_off == 8) /* 802.3-SNAP frame */
+				ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
+			else if (ip_off != 0)
+				return -2;
+
+			ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
+				TPD_IPHL_SHIFT;
+			ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
+				TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT;
+			ptpd->word3 |= (skb_shinfo(skb)->gso_size &
+				TPD_MSS_MASK) << TPD_MSS_SHIFT;
+			ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
+			return 3;
 		}
 	}
 	return false;
 }
 
 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
-	struct csum_param *csum)
+	struct tx_packet_desc *ptpd)
 {
 	u8 css, cso;
 
 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-		cso = skb_transport_offset(skb);
-		css = cso + skb->csum_offset;
-		if (unlikely(cso & 0x1)) {
-			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
-				"payload offset not an even number\n");
+		css = (u8) (skb->csum_start - skb_headroom(skb));
+		cso = css + (u8) skb->csum_offset;
+		if (unlikely(css & 0x1)) {
+			/* L1 hardware requires an even number here */
+			if (netif_msg_tx_err(adapter))
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"payload offset not an even number\n");
 			return -1;
 		}
-		csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) <<
-			CSUM_PARAM_PLOADOFFSET_SHIFT;
-		csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) <<
-			CSUM_PARAM_XSUMOFFSET_SHIFT;
-		csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT;
+		ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) <<
+			TPD_PLOADOFFSET_SHIFT;
+		ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) <<
+			TPD_CCSUMOFFSET_SHIFT;
+		ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
 		return true;
 	}
-
-	return true;
+	return 0;
 }
 
 static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
-	bool tcp_seg)
+	struct tx_packet_desc *ptpd)
 {
-	/* We enter this function holding a spinlock. */
+	/* spinlock held */
 	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
 	struct atl1_buffer *buffer_info;
+	u16 buf_len = skb->len;
 	struct page *page;
-	int first_buf_len = skb->len;
 	unsigned long offset;
 	unsigned int nr_frags;
 	unsigned int f;
-	u16 tpd_next_to_use;
-	u16 proto_hdr_len;
-	u16 len12;
+	int retval;
+	u16 next_to_use;
+	u16 data_len;
+	u8 hdr_len;
 
-	first_buf_len -= skb->data_len;
+	buf_len -= skb->data_len;
 	nr_frags = skb_shinfo(skb)->nr_frags;
-	tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
-	buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
+	next_to_use = atomic_read(&tpd_ring->next_to_use);
+	buffer_info = &tpd_ring->buffer_info[next_to_use];
 	if (unlikely(buffer_info->skb))
 		BUG();
-	buffer_info->skb = NULL;	/* put skb in last TPD */
-
-	if (tcp_seg) {
-		/* TSO/GSO */
-		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-		buffer_info->length = proto_hdr_len;
+	/* put skb in last TPD */
+	buffer_info->skb = NULL;
+
+	retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
+	if (retval) {
+		/* TSO */
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		buffer_info->length = hdr_len;
 		page = virt_to_page(skb->data);
 		offset = (unsigned long)skb->data & ~PAGE_MASK;
 		buffer_info->dma = pci_map_page(adapter->pdev, page,
-						offset, proto_hdr_len,
+						offset, hdr_len,
 						PCI_DMA_TODEVICE);
 
-		if (++tpd_next_to_use == tpd_ring->count)
-			tpd_next_to_use = 0;
+		if (++next_to_use == tpd_ring->count)
+			next_to_use = 0;
 
-		if (first_buf_len > proto_hdr_len) {
-			int i, m;
+		if (buf_len > hdr_len) {
+			int i, nseg;
 
-			len12 = first_buf_len - proto_hdr_len;
-			m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) /
+			data_len = buf_len - hdr_len;
+			nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
 				ATL1_MAX_TX_BUF_LEN;
-			for (i = 0; i < m; i++) {
+			for (i = 0; i < nseg; i++) {
 				buffer_info =
-				    &tpd_ring->buffer_info[tpd_next_to_use];
+				    &tpd_ring->buffer_info[next_to_use];
 				buffer_info->skb = NULL;
 				buffer_info->length =
 				    (ATL1_MAX_TX_BUF_LEN >=
-				     len12) ? ATL1_MAX_TX_BUF_LEN : len12;
-				len12 -= buffer_info->length;
+				     data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
+				data_len -= buffer_info->length;
 				page = virt_to_page(skb->data +
-					(proto_hdr_len +
-					i * ATL1_MAX_TX_BUF_LEN));
+					(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
 				offset = (unsigned long)(skb->data +
-					(proto_hdr_len +
-					i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK;
+					(hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
+					~PAGE_MASK;
 				buffer_info->dma = pci_map_page(adapter->pdev,
 					page, offset, buffer_info->length,
 					PCI_DMA_TODEVICE);
-				if (++tpd_next_to_use == tpd_ring->count)
-					tpd_next_to_use = 0;
+				if (++next_to_use == tpd_ring->count)
+					next_to_use = 0;
 			}
 		}
 	} else {
-		/* not TSO/GSO */
-		buffer_info->length = first_buf_len;
+		/* not TSO */
+		buffer_info->length = buf_len;
 		page = virt_to_page(skb->data);
 		offset = (unsigned long)skb->data & ~PAGE_MASK;
 		buffer_info->dma = pci_map_page(adapter->pdev, page,
-			offset, first_buf_len, PCI_DMA_TODEVICE);
-		if (++tpd_next_to_use == tpd_ring->count)
-			tpd_next_to_use = 0;
+			offset, buf_len, PCI_DMA_TODEVICE);
+		if (++next_to_use == tpd_ring->count)
+			next_to_use = 0;
 	}
 
 	for (f = 0; f < nr_frags; f++) {
 		struct skb_frag_struct *frag;
-		u16 lenf, i, m;
+		u16 i, nseg;
 
 		frag = &skb_shinfo(skb)->frags[f];
-		lenf = frag->size;
+		buf_len = frag->size;
 
-		m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN;
-		for (i = 0; i < m; i++) {
-			buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
+		nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
+			ATL1_MAX_TX_BUF_LEN;
+		for (i = 0; i < nseg; i++) {
+			buffer_info = &tpd_ring->buffer_info[next_to_use];
 			if (unlikely(buffer_info->skb))
 				BUG();
 			buffer_info->skb = NULL;
-			buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ?
-				ATL1_MAX_TX_BUF_LEN : lenf;
-			lenf -= buffer_info->length;
+			buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
+				ATL1_MAX_TX_BUF_LEN : buf_len;
+			buf_len -= buffer_info->length;
 			buffer_info->dma = pci_map_page(adapter->pdev,
 				frag->page,
 				frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
 				buffer_info->length, PCI_DMA_TODEVICE);
 
-			if (++tpd_next_to_use == tpd_ring->count)
-				tpd_next_to_use = 0;
+			if (++next_to_use == tpd_ring->count)
+				next_to_use = 0;
 		}
 	}
 
@@ -1602,39 +2180,44 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
 	buffer_info->skb = skb;
 }
 
-static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
-       union tpd_descr *descr)
+static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
+       struct tx_packet_desc *ptpd)
 {
-	/* We enter this function holding a spinlock. */
+	/* spinlock held */
 	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
-	int j;
-	u32 val;
 	struct atl1_buffer *buffer_info;
 	struct tx_packet_desc *tpd;
-	u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
+	u16 j;
+	u32 val;
+	u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
 
 	for (j = 0; j < count; j++) {
-		buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
-		tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use);
-		tpd->desc.csum.csumpu = descr->csum.csumpu;
-		tpd->desc.csum.csumpl = descr->csum.csumpl;
-		tpd->desc.tso.tsopu = descr->tso.tsopu;
-		tpd->desc.tso.tsopl = descr->tso.tsopl;
+		buffer_info = &tpd_ring->buffer_info[next_to_use];
+		tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
+		if (tpd != ptpd)
+			memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
 		tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
-		tpd->desc.data = descr->data;
-		tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) &
-			CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT;
+		tpd->word2 = (cpu_to_le16(buffer_info->length) &
+			TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
 
-		val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) &
-			TSO_PARAM_SEGMENT_MASK;
-		if (val && !j)
-			tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT;
+		/*
+		 * if this is the first packet in a TSO chain, set
+		 * TPD_HDRFLAG, otherwise, clear it.
+		 */
+		val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
+			TPD_SEGMENT_EN_MASK;
+		if (val) {
+			if (!j)
+				tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
+			else
+				tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
+		}
 
 		if (j == (count - 1))
-			tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT;
+			tpd->word3 |= 1 << TPD_EOP_SHIFT;
 
-		if (++tpd_next_to_use == tpd_ring->count)
-			tpd_next_to_use = 0;
+		if (++next_to_use == tpd_ring->count)
+			next_to_use = 0;
 	}
 	/*
 	 * Force memory writes to complete before letting h/w
@@ -1644,18 +2227,18 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
 	 */
 	wmb();
 
-	atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use);
+	atomic_set(&tpd_ring->next_to_use, next_to_use);
 }
 
 static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
 	int len = skb->len;
 	int tso;
 	int count = 1;
 	int ret_val;
-	u32 val;
-	union tpd_descr param;
+	struct tx_packet_desc *ptpd;
 	u16 frag_size;
 	u16 vlan_tag;
 	unsigned long flags;
@@ -1666,18 +2249,11 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
 	len -= skb->data_len;
 
-	if (unlikely(skb->len == 0)) {
+	if (unlikely(skb->len <= 0)) {
 		dev_kfree_skb_any(skb);
 		return NETDEV_TX_OK;
 	}
 
-	param.data = 0;
-	param.tso.tsopu = 0;
-	param.tso.tsopl = 0;
-	param.csum.csumpu = 0;
-	param.csum.csumpl = 0;
-
-	/* nr_frags will be nonzero if we're doing scatter/gather (SG) */
 	nr_frags = skb_shinfo(skb)->nr_frags;
 	for (f = 0; f < nr_frags; f++) {
 		frag_size = skb_shinfo(skb)->frags[f].size;
@@ -1686,10 +2262,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 				ATL1_MAX_TX_BUF_LEN;
 	}
 
-	/* mss will be nonzero if we're doing segment offload (TSO/GSO) */
 	mss = skb_shinfo(skb)->gso_size;
 	if (mss) {
-		if (skb->protocol == htons(ETH_P_IP)) {
+		if (skb->protocol == ntohs(ETH_P_IP)) {
 			proto_hdr_len = (skb_transport_offset(skb) +
 					 tcp_hdrlen(skb));
 			if (unlikely(proto_hdr_len > len)) {
@@ -1706,7 +2281,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
 	if (!spin_trylock_irqsave(&adapter->lock, flags)) {
 		/* Can't get lock - tell upper layer to requeue */
-		dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n");
+		if (netif_msg_tx_queued(adapter))
+			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+				"tx locked\n");
 		return NETDEV_TX_LOCKED;
 	}
 
@@ -1714,22 +2291,26 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 		/* not enough descriptors */
 		netif_stop_queue(netdev);
 		spin_unlock_irqrestore(&adapter->lock, flags);
-		dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n");
+		if (netif_msg_tx_queued(adapter))
+			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+				"tx busy\n");
 		return NETDEV_TX_BUSY;
 	}
 
-	param.data = 0;
+	ptpd = ATL1_TPD_DESC(tpd_ring,
+		(u16) atomic_read(&tpd_ring->next_to_use));
+	memset(ptpd, 0, sizeof(struct tx_packet_desc));
 
 	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
 		vlan_tag = vlan_tx_tag_get(skb);
 		vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
 			((vlan_tag >> 9) & 0x8);
-		param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT;
-		param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) <<
-			CSUM_PARAM_VALAN_SHIFT;
+		ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
+		ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) <<
+			TPD_VL_TAGGED_SHIFT;
 	}
 
-	tso = atl1_tso(adapter, skb, &param.tso);
+	tso = atl1_tso(adapter, skb, ptpd);
 	if (tso < 0) {
 		spin_unlock_irqrestore(&adapter->lock, flags);
 		dev_kfree_skb_any(skb);
@@ -1737,7 +2318,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	}
 
 	if (!tso) {
-		ret_val = atl1_tx_csum(adapter, skb, &param.csum);
+		ret_val = atl1_tx_csum(adapter, skb, ptpd);
 		if (ret_val < 0) {
 			spin_unlock_irqrestore(&adapter->lock, flags);
 			dev_kfree_skb_any(skb);
@@ -1745,13 +2326,11 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 		}
 	}
 
-	val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) &
-		CSUM_PARAM_SEGMENT_MASK;
-	atl1_tx_map(adapter, skb, 1 == val);
-	atl1_tx_queue(adapter, count, &param);
-	netdev->trans_start = jiffies;
-	spin_unlock_irqrestore(&adapter->lock, flags);
+	atl1_tx_map(adapter, skb, ptpd);
+	atl1_tx_queue(adapter, count, ptpd);
 	atl1_update_mailbox(adapter);
+	spin_unlock_irqrestore(&adapter->lock, flags);
+	netdev->trans_start = jiffies;
 	return NETDEV_TX_OK;
 }
 
@@ -1776,7 +2355,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
 		adapter->cmb.cmb->int_stats = 0;
 
 		if (status & ISR_GPHY)	/* clear phy status */
-			atl1_clear_phy_int(adapter);
+			atlx_clear_phy_int(adapter);
 
 		/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
 		iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
@@ -1787,8 +2366,9 @@ static irqreturn_t atl1_intr(int irq, void *data)
 
 		/* check if PCIE PHY Link down */
 		if (status & ISR_PHY_LINKDOWN) {
-			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
-				"pcie phy link down %x\n", status);
+			if (netif_msg_intr(adapter))
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"pcie phy link down %x\n", status);
 			if (netif_running(adapter->netdev)) {	/* reset MAC */
 				iowrite32(0, adapter->hw.hw_addr + REG_IMR);
 				schedule_work(&adapter->pcie_dma_to_rst_task);
@@ -1798,9 +2378,10 @@ static irqreturn_t atl1_intr(int irq, void *data)
 
 		/* check if DMA read/write error ? */
 		if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
-			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
-				"pcie DMA r/w error (status = 0x%x)\n",
-				status);
+			if (netif_msg_intr(adapter))
+				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+					"pcie DMA r/w error (status = 0x%x)\n",
+					status);
 			iowrite32(0, adapter->hw.hw_addr + REG_IMR);
 			schedule_work(&adapter->pcie_dma_to_rst_task);
 			return IRQ_HANDLED;
@@ -1823,8 +2404,11 @@ static irqreturn_t atl1_intr(int irq, void *data)
 			if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
 				ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
 				ISR_HOST_RRD_OV))
-				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
-					"rx exception, ISR = 0x%x\n", status);
+				if (netif_msg_intr(adapter))
+					dev_printk(KERN_DEBUG,
+						&adapter->pdev->dev,
+						"rx exception, ISR = 0x%x\n",
+						status);
 			atl1_intr_rx(adapter);
 		}
 
@@ -1856,119 +2440,51 @@ static void atl1_watchdog(unsigned long data)
  */
 static void atl1_phy_config(unsigned long data)
 {
-	struct atl1_adapter *adapter = (struct atl1_adapter *)data;
-	struct atl1_hw *hw = &adapter->hw;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adapter->lock, flags);
-	adapter->phy_timer_pending = false;
-	atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
-	atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg);
-	atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
-	spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
-/*
- * atl1_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
- */
-static void atl1_tx_timeout(struct net_device *netdev)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
-	/* Do the reset outside of interrupt context */
-	schedule_work(&adapter->tx_timeout_task);
-}
-
-/*
- * Orphaned vendor comment left intact here:
- * <vendor comment>
- * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
- * will assert. We do soft reset <0x1400=1> according
- * with the SPEC. BUT, it seemes that PCIE or DMA
- * state-machine will not be reset. DMAR_TO_INT will
- * assert again and again.
- * </vendor comment>
- */
-static void atl1_tx_timeout_task(struct work_struct *work)
-{
-	struct atl1_adapter *adapter =
-		container_of(work, struct atl1_adapter, tx_timeout_task);
-	struct net_device *netdev = adapter->netdev;
-
-	netif_device_detach(netdev);
-	atl1_down(adapter);
-	atl1_up(adapter);
-	netif_device_attach(netdev);
-}
-
-/*
- * atl1_link_chg_task - deal with link change event Out of interrupt context
- */
-static void atl1_link_chg_task(struct work_struct *work)
-{
-	struct atl1_adapter *adapter =
-               container_of(work, struct atl1_adapter, link_chg_task);
-	unsigned long flags;
-
-	spin_lock_irqsave(&adapter->lock, flags);
-	atl1_check_link(adapter);
-	spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
-static void atl1_vlan_rx_register(struct net_device *netdev,
-	struct vlan_group *grp)
-{
-	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+	struct atl1_hw *hw = &adapter->hw;
 	unsigned long flags;
-	u32 ctrl;
 
 	spin_lock_irqsave(&adapter->lock, flags);
-	/* atl1_irq_disable(adapter); */
-	adapter->vlgrp = grp;
-
-	if (grp) {
-		/* enable VLAN tag insert/strip */
-		ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
-		ctrl |= MAC_CTRL_RMV_VLAN;
-		iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
-	} else {
-		/* disable VLAN tag insert/strip */
-		ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
-		ctrl &= ~MAC_CTRL_RMV_VLAN;
-		iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
-	}
-
-	/* atl1_irq_enable(adapter); */
+	adapter->phy_timer_pending = false;
+	atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
+	atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
+	atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
 	spin_unlock_irqrestore(&adapter->lock, flags);
 }
 
-static void atl1_restore_vlan(struct atl1_adapter *adapter)
-{
-	atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp);
-}
+/*
+ * Orphaned vendor comment left intact here:
+ * <vendor comment>
+ * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
+ * will assert. We do soft reset <0x1400=1> according
+ * with the SPEC. BUT, it seemes that PCIE or DMA
+ * state-machine will not be reset. DMAR_TO_INT will
+ * assert again and again.
+ * </vendor comment>
+ */
 
-int atl1_reset(struct atl1_adapter *adapter)
+static int atl1_reset(struct atl1_adapter *adapter)
 {
 	int ret;
-
 	ret = atl1_reset_hw(&adapter->hw);
-	if (ret != ATL1_SUCCESS)
+	if (ret)
 		return ret;
 	return atl1_init_hw(&adapter->hw);
 }
 
-s32 atl1_up(struct atl1_adapter *adapter)
+static s32 atl1_up(struct atl1_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
 	int err;
 	int irq_flags = IRQF_SAMPLE_RANDOM;
 
 	/* hardware has been reset, we need to reload some things */
-	atl1_set_multi(netdev);
+	atlx_set_multi(netdev);
 	atl1_init_ring_ptrs(adapter);
-	atl1_restore_vlan(adapter);
+	atlx_restore_vlan(adapter);
 	err = atl1_alloc_rx_buffers(adapter);
-	if (unlikely(!err))		/* no RX BUFFER allocated */
+	if (unlikely(!err))
+		/* no RX BUFFER allocated */
 		return -ENOMEM;
 
 	if (unlikely(atl1_configure(adapter))) {
@@ -1978,8 +2494,9 @@ s32 atl1_up(struct atl1_adapter *adapter)
 
 	err = pci_enable_msi(adapter->pdev);
 	if (err) {
-		dev_info(&adapter->pdev->dev,
-			"Unable to enable MSI: %d\n", err);
+		if (netif_msg_ifup(adapter))
+			dev_info(&adapter->pdev->dev,
+				"Unable to enable MSI: %d\n", err);
 		irq_flags |= IRQF_SHARED;
 	}
 
@@ -1989,7 +2506,7 @@ s32 atl1_up(struct atl1_adapter *adapter)
 		goto err_up;
 
 	mod_timer(&adapter->watchdog_timer, jiffies);
-	atl1_irq_enable(adapter);
+	atlx_irq_enable(adapter);
 	atl1_check_link(adapter);
 	return 0;
 
@@ -2000,7 +2517,7 @@ err_up:
 	return err;
 }
 
-void atl1_down(struct atl1_adapter *adapter)
+static void atl1_down(struct atl1_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
 
@@ -2008,7 +2525,7 @@ void atl1_down(struct atl1_adapter *adapter)
 	del_timer_sync(&adapter->phy_config_timer);
 	adapter->phy_timer_pending = false;
 
-	atl1_irq_disable(adapter);
+	atlx_irq_disable(adapter);
 	free_irq(adapter->pdev->irq, netdev);
 	pci_disable_msi(adapter->pdev);
 	atl1_reset_hw(&adapter->hw);
@@ -2023,6 +2540,52 @@ void atl1_down(struct atl1_adapter *adapter)
 	atl1_clean_rx_ring(adapter);
 }
 
+static void atl1_tx_timeout_task(struct work_struct *work)
+{
+	struct atl1_adapter *adapter =
+		container_of(work, struct atl1_adapter, tx_timeout_task);
+	struct net_device *netdev = adapter->netdev;
+
+	netif_device_detach(netdev);
+	atl1_down(adapter);
+	atl1_up(adapter);
+	netif_device_attach(netdev);
+}
+
+/*
+ * atl1_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	int old_mtu = netdev->mtu;
+	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		if (netif_msg_link(adapter))
+			dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
+		return -EINVAL;
+	}
+
+	adapter->hw.max_frame_size = max_frame;
+	adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
+	adapter->rx_buffer_len = (max_frame + 7) & ~7;
+	adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
+
+	netdev->mtu = new_mtu;
+	if ((old_mtu != new_mtu) && netif_running(netdev)) {
+		atl1_down(adapter);
+		atl1_up(adapter);
+	}
+
+	return 0;
+}
+
 /*
  * atl1_open - Called when a network interface is made active
  * @netdev: network interface device structure
@@ -2091,7 +2654,7 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
 	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
 	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
 	if (ctrl & BMSR_LSTATUS)
-		wufc &= ~ATL1_WUFC_LNKC;
+		wufc &= ~ATLX_WUFC_LNKC;
 
 	/* reduce speed to 10/100M */
 	if (wufc) {
@@ -2099,15 +2662,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
 		/* if resume, let driver to re- setup link */
 		hw->phy_configured = false;
 		atl1_set_mac_addr(hw);
-		atl1_set_multi(netdev);
+		atlx_set_multi(netdev);
 
 		ctrl = 0;
 		/* turn on magic packet wol */
-		if (wufc & ATL1_WUFC_MAG)
+		if (wufc & ATLX_WUFC_MAG)
 			ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
 
 		/* turn on Link change WOL */
-		if (wufc & ATL1_WUFC_LNKC)
+		if (wufc & ATLX_WUFC_LNKC)
 			ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
 		iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
 
@@ -2115,13 +2678,13 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
 		ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
 		ctrl &= ~MAC_CTRL_DBG;
 		ctrl &= ~MAC_CTRL_PROMIS_EN;
-		if (wufc & ATL1_WUFC_MC)
+		if (wufc & ATLX_WUFC_MC)
 			ctrl |= MAC_CTRL_MC_ALL_EN;
 		else
 			ctrl &= ~MAC_CTRL_MC_ALL_EN;
 
 		/* turn on broadcast mode if wake on-BC is enabled */
-		if (wufc & ATL1_WUFC_BC)
+		if (wufc & ATLX_WUFC_BC)
 			ctrl |= MAC_CTRL_BC_EN;
 		else
 			ctrl &= ~MAC_CTRL_BC_EN;
@@ -2149,12 +2712,13 @@ static int atl1_resume(struct pci_dev *pdev)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct atl1_adapter *adapter = netdev_priv(netdev);
-	u32 ret_val;
+	u32 err;
 
-	pci_set_power_state(pdev, 0);
+	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
 
-	ret_val = pci_enable_device(pdev);
+	/* FIXME: check and handle */
+	err = pci_enable_device(pdev);
 	pci_enable_wake(pdev, PCI_D3hot, 0);
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
@@ -2221,14 +2785,16 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
 		dev_err(&pdev->dev, "no usable DMA configuration\n");
 		goto err_dma;
 	}
-	/* Mark all PCI regions associated with PCI device
+	/*
+	 * Mark all PCI regions associated with PCI device
 	 * pdev as being reserved by owner atl1_driver_name
 	 */
-	err = pci_request_regions(pdev, atl1_driver_name);
+	err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
 	if (err)
 		goto err_request_regions;
 
-	/* Enables bus-mastering on the device and calls
+	/*
+	 * Enables bus-mastering on the device and calls
 	 * pcibios_set_master to do the needed arch specific settings
 	 */
 	pci_set_master(pdev);
@@ -2245,6 +2811,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
 	adapter->netdev = netdev;
 	adapter->pdev = pdev;
 	adapter->hw.back = adapter;
+	adapter->msg_enable = netif_msg_init(debug, atl1_default_msg);
 
 	adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
 	if (!adapter->hw.hw_addr) {
@@ -2254,7 +2821,8 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
 	/* get device revision number */
 	adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
 		(REG_MASTER_CTRL + 2));
-	dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
+	if (netif_msg_probe(adapter))
+		dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
 
 	/* set default ring resource counts */
 	adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
@@ -2269,17 +2837,17 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
 	netdev->open = &atl1_open;
 	netdev->stop = &atl1_close;
 	netdev->hard_start_xmit = &atl1_xmit_frame;
-	netdev->get_stats = &atl1_get_stats;
-	netdev->set_multicast_list = &atl1_set_multi;
+	netdev->get_stats = &atlx_get_stats;
+	netdev->set_multicast_list = &atlx_set_multi;
 	netdev->set_mac_address = &atl1_set_mac;
 	netdev->change_mtu = &atl1_change_mtu;
-	netdev->do_ioctl = &atl1_ioctl;
-	netdev->tx_timeout = &atl1_tx_timeout;
+	netdev->do_ioctl = &atlx_ioctl;
+	netdev->tx_timeout = &atlx_tx_timeout;
 	netdev->watchdog_timeo = 5 * HZ;
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	netdev->poll_controller = atl1_poll_controller;
 #endif
-	netdev->vlan_rx_register = atl1_vlan_rx_register;
+	netdev->vlan_rx_register = atlx_vlan_rx_register;
 
 	netdev->ethtool_ops = &atl1_ethtool_ops;
 	adapter->bd_number = cards_found;
@@ -2292,13 +2860,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
 	netdev->features = NETIF_F_HW_CSUM;
 	netdev->features |= NETIF_F_SG;
 	netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
-
-	/*
-	 * FIXME - Until tso performance gets fixed, disable the feature.
-	 * Enable it with ethtool -K if desired.
-	 */
-	/* netdev->features |= NETIF_F_TSO; */
-
+	netdev->features |= NETIF_F_TSO;
 	netdev->features |= NETIF_F_LLTX;
 
 	/*
@@ -2309,7 +2871,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
 	/* atl1_pcie_patch(adapter); */
 
 	/* really reset GPHY core */
-	iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE);
+	iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
 
 	/*
 	 * reset the controller to
@@ -2354,7 +2916,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
 
 	INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
 
-	INIT_WORK(&adapter->link_chg_task, atl1_link_chg_task);
+	INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
 
 	INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
 
@@ -2397,7 +2959,8 @@ static void __devexit atl1_remove(struct pci_dev *pdev)
 
 	adapter = netdev_priv(netdev);
 
-	/* Some atl1 boards lack persistent storage for their MAC, and get it
+	/*
+	 * Some atl1 boards lack persistent storage for their MAC, and get it
 	 * from the BIOS during POST.  If we've been messing with the MAC
 	 * address, we need to save the permanent one.
 	 */
@@ -2407,7 +2970,7 @@ static void __devexit atl1_remove(struct pci_dev *pdev)
 		atl1_set_mac_addr(&adapter->hw);
 	}
 
-	iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE);
+	iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
 	unregister_netdev(netdev);
 	pci_iounmap(pdev, adapter->hw.hw_addr);
 	pci_release_regions(pdev);
@@ -2416,7 +2979,7 @@ static void __devexit atl1_remove(struct pci_dev *pdev)
 }
 
 static struct pci_driver atl1_driver = {
-	.name = atl1_driver_name,
+	.name = ATLX_DRIVER_NAME,
 	.id_table = atl1_pci_tbl,
 	.probe = atl1_probe,
 	.remove = __devexit_p(atl1_remove),
@@ -2448,3 +3011,554 @@ static int __init atl1_init_module(void)
 
 module_init(atl1_init_module);
 module_exit(atl1_exit_module);
+
+struct atl1_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define ATL1_STAT(m) \
+	sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
+
+static struct atl1_stats atl1_gstrings_stats[] = {
+	{"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
+	{"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
+	{"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
+	{"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
+	{"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
+	{"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
+	{"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
+	{"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
+	{"multicast", ATL1_STAT(soft_stats.multicast)},
+	{"collisions", ATL1_STAT(soft_stats.collisions)},
+	{"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
+	{"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
+	{"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
+	{"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
+	{"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
+	{"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
+	{"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
+	{"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
+	{"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
+	{"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
+	{"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
+	{"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
+	{"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
+	{"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
+	{"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
+	{"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
+	{"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
+	{"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
+	{"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
+	{"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
+	{"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
+};
+
+static void atl1_get_ethtool_stats(struct net_device *netdev,
+	struct ethtool_stats *stats, u64 *data)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	int i;
+	char *p;
+
+	for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
+		p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
+		data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+
+}
+
+static int atl1_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(atl1_gstrings_stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int atl1_get_settings(struct net_device *netdev,
+	struct ethtool_cmd *ecmd)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_hw *hw = &adapter->hw;
+
+	ecmd->supported = (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_Autoneg | SUPPORTED_TP);
+	ecmd->advertising = ADVERTISED_TP;
+	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+	    hw->media_type == MEDIA_TYPE_1000M_FULL) {
+		ecmd->advertising |= ADVERTISED_Autoneg;
+		if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
+			ecmd->advertising |= ADVERTISED_Autoneg;
+			ecmd->advertising |=
+			    (ADVERTISED_10baseT_Half |
+			     ADVERTISED_10baseT_Full |
+			     ADVERTISED_100baseT_Half |
+			     ADVERTISED_100baseT_Full |
+			     ADVERTISED_1000baseT_Full);
+		} else
+			ecmd->advertising |= (ADVERTISED_1000baseT_Full);
+	}
+	ecmd->port = PORT_TP;
+	ecmd->phy_address = 0;
+	ecmd->transceiver = XCVR_INTERNAL;
+
+	if (netif_carrier_ok(adapter->netdev)) {
+		u16 link_speed, link_duplex;
+		atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
+		ecmd->speed = link_speed;
+		if (link_duplex == FULL_DUPLEX)
+			ecmd->duplex = DUPLEX_FULL;
+		else
+			ecmd->duplex = DUPLEX_HALF;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+	    hw->media_type == MEDIA_TYPE_1000M_FULL)
+		ecmd->autoneg = AUTONEG_ENABLE;
+	else
+		ecmd->autoneg = AUTONEG_DISABLE;
+
+	return 0;
+}
+
+static int atl1_set_settings(struct net_device *netdev,
+	struct ethtool_cmd *ecmd)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_hw *hw = &adapter->hw;
+	u16 phy_data;
+	int ret_val = 0;
+	u16 old_media_type = hw->media_type;
+
+	if (netif_running(adapter->netdev)) {
+		if (netif_msg_link(adapter))
+			dev_dbg(&adapter->pdev->dev,
+				"ethtool shutting down adapter\n");
+		atl1_down(adapter);
+	}
+
+	if (ecmd->autoneg == AUTONEG_ENABLE)
+		hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
+	else {
+		if (ecmd->speed == SPEED_1000) {
+			if (ecmd->duplex != DUPLEX_FULL) {
+				if (netif_msg_link(adapter))
+					dev_warn(&adapter->pdev->dev,
+						"1000M half is invalid\n");
+				ret_val = -EINVAL;
+				goto exit_sset;
+			}
+			hw->media_type = MEDIA_TYPE_1000M_FULL;
+		} else if (ecmd->speed == SPEED_100) {
+			if (ecmd->duplex == DUPLEX_FULL)
+				hw->media_type = MEDIA_TYPE_100M_FULL;
+			else
+				hw->media_type = MEDIA_TYPE_100M_HALF;
+		} else {
+			if (ecmd->duplex == DUPLEX_FULL)
+				hw->media_type = MEDIA_TYPE_10M_FULL;
+			else
+				hw->media_type = MEDIA_TYPE_10M_HALF;
+		}
+	}
+	switch (hw->media_type) {
+	case MEDIA_TYPE_AUTO_SENSOR:
+		ecmd->advertising =
+		    ADVERTISED_10baseT_Half |
+		    ADVERTISED_10baseT_Full |
+		    ADVERTISED_100baseT_Half |
+		    ADVERTISED_100baseT_Full |
+		    ADVERTISED_1000baseT_Full |
+		    ADVERTISED_Autoneg | ADVERTISED_TP;
+		break;
+	case MEDIA_TYPE_1000M_FULL:
+		ecmd->advertising =
+		    ADVERTISED_1000baseT_Full |
+		    ADVERTISED_Autoneg | ADVERTISED_TP;
+		break;
+	default:
+		ecmd->advertising = 0;
+		break;
+	}
+	if (atl1_phy_setup_autoneg_adv(hw)) {
+		ret_val = -EINVAL;
+		if (netif_msg_link(adapter))
+			dev_warn(&adapter->pdev->dev,
+				"invalid ethtool speed/duplex setting\n");
+		goto exit_sset;
+	}
+	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+	    hw->media_type == MEDIA_TYPE_1000M_FULL)
+		phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
+	else {
+		switch (hw->media_type) {
+		case MEDIA_TYPE_100M_FULL:
+			phy_data =
+			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
+			    MII_CR_RESET;
+			break;
+		case MEDIA_TYPE_100M_HALF:
+			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+			break;
+		case MEDIA_TYPE_10M_FULL:
+			phy_data =
+			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
+			break;
+		default:
+			/* MEDIA_TYPE_10M_HALF: */
+			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+			break;
+		}
+	}
+	atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+exit_sset:
+	if (ret_val)
+		hw->media_type = old_media_type;
+
+	if (netif_running(adapter->netdev)) {
+		if (netif_msg_link(adapter))
+			dev_dbg(&adapter->pdev->dev,
+				"ethtool starting adapter\n");
+		atl1_up(adapter);
+	} else if (!ret_val) {
+		if (netif_msg_link(adapter))
+			dev_dbg(&adapter->pdev->dev,
+				"ethtool resetting adapter\n");
+		atl1_reset(adapter);
+	}
+	return ret_val;
+}
+
+static void atl1_get_drvinfo(struct net_device *netdev,
+	struct ethtool_drvinfo *drvinfo)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+
+	strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
+	strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
+		sizeof(drvinfo->version));
+	strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
+	drvinfo->eedump_len = ATL1_EEDUMP_LEN;
+}
+
+static void atl1_get_wol(struct net_device *netdev,
+	struct ethtool_wolinfo *wol)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+
+	wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
+	wol->wolopts = 0;
+	if (adapter->wol & ATLX_WUFC_EX)
+		wol->wolopts |= WAKE_UCAST;
+	if (adapter->wol & ATLX_WUFC_MC)
+		wol->wolopts |= WAKE_MCAST;
+	if (adapter->wol & ATLX_WUFC_BC)
+		wol->wolopts |= WAKE_BCAST;
+	if (adapter->wol & ATLX_WUFC_MAG)
+		wol->wolopts |= WAKE_MAGIC;
+	return;
+}
+
+static int atl1_set_wol(struct net_device *netdev,
+	struct ethtool_wolinfo *wol)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+
+	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+		return -EOPNOTSUPP;
+	adapter->wol = 0;
+	if (wol->wolopts & WAKE_UCAST)
+		adapter->wol |= ATLX_WUFC_EX;
+	if (wol->wolopts & WAKE_MCAST)
+		adapter->wol |= ATLX_WUFC_MC;
+	if (wol->wolopts & WAKE_BCAST)
+		adapter->wol |= ATLX_WUFC_BC;
+	if (wol->wolopts & WAKE_MAGIC)
+		adapter->wol |= ATLX_WUFC_MAG;
+	return 0;
+}
+
+static u32 atl1_get_msglevel(struct net_device *netdev)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+static void atl1_set_msglevel(struct net_device *netdev, u32 value)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = value;
+}
+
+static int atl1_get_regs_len(struct net_device *netdev)
+{
+	return ATL1_REG_COUNT * sizeof(u32);
+}
+
+static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+	void *p)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_hw *hw = &adapter->hw;
+	unsigned int i;
+	u32 *regbuf = p;
+
+	for (i = 0; i < ATL1_REG_COUNT; i++) {
+		/*
+		 * This switch statement avoids reserved regions
+		 * of register space.
+		 */
+		switch (i) {
+		case 6 ... 9:
+		case 14:
+		case 29 ... 31:
+		case 34 ... 63:
+		case 75 ... 127:
+		case 136 ... 1023:
+		case 1027 ... 1087:
+		case 1091 ... 1151:
+		case 1194 ... 1195:
+		case 1200 ... 1201:
+		case 1206 ... 1213:
+		case 1216 ... 1279:
+		case 1290 ... 1311:
+		case 1323 ... 1343:
+		case 1358 ... 1359:
+		case 1368 ... 1375:
+		case 1378 ... 1383:
+		case 1388 ... 1391:
+		case 1393 ... 1395:
+		case 1402 ... 1403:
+		case 1410 ... 1471:
+		case 1522 ... 1535:
+			/* reserved region; don't read it */
+			regbuf[i] = 0;
+			break;
+		default:
+			/* unreserved region */
+			regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
+		}
+	}
+}
+
+static void atl1_get_ringparam(struct net_device *netdev,
+	struct ethtool_ringparam *ring)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
+	struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
+
+	ring->rx_max_pending = ATL1_MAX_RFD;
+	ring->tx_max_pending = ATL1_MAX_TPD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = rxdr->count;
+	ring->tx_pending = txdr->count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static int atl1_set_ringparam(struct net_device *netdev,
+	struct ethtool_ringparam *ring)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
+	struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
+	struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
+
+	struct atl1_tpd_ring tpd_old, tpd_new;
+	struct atl1_rfd_ring rfd_old, rfd_new;
+	struct atl1_rrd_ring rrd_old, rrd_new;
+	struct atl1_ring_header rhdr_old, rhdr_new;
+	int err;
+
+	tpd_old = adapter->tpd_ring;
+	rfd_old = adapter->rfd_ring;
+	rrd_old = adapter->rrd_ring;
+	rhdr_old = adapter->ring_header;
+
+	if (netif_running(adapter->netdev))
+		atl1_down(adapter);
+
+	rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
+	rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
+			rfdr->count;
+	rfdr->count = (rfdr->count + 3) & ~3;
+	rrdr->count = rfdr->count;
+
+	tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
+	tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
+			tpdr->count;
+	tpdr->count = (tpdr->count + 3) & ~3;
+
+	if (netif_running(adapter->netdev)) {
+		/* try to get new resources before deleting old */
+		err = atl1_setup_ring_resources(adapter);
+		if (err)
+			goto err_setup_ring;
+
+		/*
+		 * save the new, restore the old in order to free it,
+		 * then restore the new back again
+		 */
+
+		rfd_new = adapter->rfd_ring;
+		rrd_new = adapter->rrd_ring;
+		tpd_new = adapter->tpd_ring;
+		rhdr_new = adapter->ring_header;
+		adapter->rfd_ring = rfd_old;
+		adapter->rrd_ring = rrd_old;
+		adapter->tpd_ring = tpd_old;
+		adapter->ring_header = rhdr_old;
+		atl1_free_ring_resources(adapter);
+		adapter->rfd_ring = rfd_new;
+		adapter->rrd_ring = rrd_new;
+		adapter->tpd_ring = tpd_new;
+		adapter->ring_header = rhdr_new;
+
+		err = atl1_up(adapter);
+		if (err)
+			return err;
+	}
+	return 0;
+
+err_setup_ring:
+	adapter->rfd_ring = rfd_old;
+	adapter->rrd_ring = rrd_old;
+	adapter->tpd_ring = tpd_old;
+	adapter->ring_header = rhdr_old;
+	atl1_up(adapter);
+	return err;
+}
+
+static void atl1_get_pauseparam(struct net_device *netdev,
+	struct ethtool_pauseparam *epause)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_hw *hw = &adapter->hw;
+
+	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+	    hw->media_type == MEDIA_TYPE_1000M_FULL) {
+		epause->autoneg = AUTONEG_ENABLE;
+	} else {
+		epause->autoneg = AUTONEG_DISABLE;
+	}
+	epause->rx_pause = 1;
+	epause->tx_pause = 1;
+}
+
+static int atl1_set_pauseparam(struct net_device *netdev,
+	struct ethtool_pauseparam *epause)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_hw *hw = &adapter->hw;
+
+	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+	    hw->media_type == MEDIA_TYPE_1000M_FULL) {
+		epause->autoneg = AUTONEG_ENABLE;
+	} else {
+		epause->autoneg = AUTONEG_DISABLE;
+	}
+
+	epause->rx_pause = 1;
+	epause->tx_pause = 1;
+
+	return 0;
+}
+
+/* FIXME: is this right? -- CHS */
+static u32 atl1_get_rx_csum(struct net_device *netdev)
+{
+	return 1;
+}
+
+static void atl1_get_strings(struct net_device *netdev, u32 stringset,
+	u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
+			memcpy(p, atl1_gstrings_stats[i].stat_string,
+				ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static int atl1_nway_reset(struct net_device *netdev)
+{
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+	struct atl1_hw *hw = &adapter->hw;
+
+	if (netif_running(netdev)) {
+		u16 phy_data;
+		atl1_down(adapter);
+
+		if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+			hw->media_type == MEDIA_TYPE_1000M_FULL) {
+			phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
+		} else {
+			switch (hw->media_type) {
+			case MEDIA_TYPE_100M_FULL:
+				phy_data = MII_CR_FULL_DUPLEX |
+					MII_CR_SPEED_100 | MII_CR_RESET;
+				break;
+			case MEDIA_TYPE_100M_HALF:
+				phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+				break;
+			case MEDIA_TYPE_10M_FULL:
+				phy_data = MII_CR_FULL_DUPLEX |
+					MII_CR_SPEED_10 | MII_CR_RESET;
+				break;
+			default:
+				/* MEDIA_TYPE_10M_HALF */
+				phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+			}
+		}
+		atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+		atl1_up(adapter);
+	}
+	return 0;
+}
+
+const struct ethtool_ops atl1_ethtool_ops = {
+	.get_settings		= atl1_get_settings,
+	.set_settings		= atl1_set_settings,
+	.get_drvinfo		= atl1_get_drvinfo,
+	.get_wol		= atl1_get_wol,
+	.set_wol		= atl1_set_wol,
+	.get_msglevel		= atl1_get_msglevel,
+	.set_msglevel		= atl1_set_msglevel,
+	.get_regs_len		= atl1_get_regs_len,
+	.get_regs		= atl1_get_regs,
+	.get_ringparam		= atl1_get_ringparam,
+	.set_ringparam		= atl1_set_ringparam,
+	.get_pauseparam		= atl1_get_pauseparam,
+	.set_pauseparam		= atl1_set_pauseparam,
+	.get_rx_csum		= atl1_get_rx_csum,
+	.set_tx_csum		= ethtool_op_set_tx_hw_csum,
+	.get_link		= ethtool_op_get_link,
+	.set_sg			= ethtool_op_set_sg,
+	.get_strings		= atl1_get_strings,
+	.nway_reset		= atl1_nway_reset,
+	.get_ethtool_stats	= atl1_get_ethtool_stats,
+	.get_sset_count		= atl1_get_sset_count,
+	.set_tso		= ethtool_op_set_tso,
+};

+ 796 - 0
drivers/net/atlx/atl1.h

@@ -0,0 +1,796 @@
+/*
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#ifndef ATL1_H
+#define ATL1_H
+
+#include <linux/compiler.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "atlx.h"
+
+#define ATLX_DRIVER_NAME "atl1"
+
+MODULE_DESCRIPTION("Atheros L1 Gigabit Ethernet Driver");
+
+#define atlx_adapter		atl1_adapter
+#define atlx_check_for_link	atl1_check_for_link
+#define atlx_check_link		atl1_check_link
+#define atlx_hash_mc_addr	atl1_hash_mc_addr
+#define atlx_hash_set		atl1_hash_set
+#define atlx_hw			atl1_hw
+#define atlx_mii_ioctl		atl1_mii_ioctl
+#define atlx_read_phy_reg	atl1_read_phy_reg
+#define atlx_set_mac		atl1_set_mac
+#define atlx_set_mac_addr	atl1_set_mac_addr
+
+struct atl1_adapter;
+struct atl1_hw;
+
+/* function prototypes needed by multiple files */
+u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
+s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
+void atl1_set_mac_addr(struct atl1_hw *hw);
+static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+	int cmd);
+static u32 atl1_check_link(struct atl1_adapter *adapter);
+
+extern const struct ethtool_ops atl1_ethtool_ops;
+
+/* hardware definitions specific to L1 */
+
+/* Block IDLE Status Register */
+#define IDLE_STATUS_RXMAC			0x1
+#define IDLE_STATUS_TXMAC			0x2
+#define IDLE_STATUS_RXQ				0x4
+#define IDLE_STATUS_TXQ				0x8
+#define IDLE_STATUS_DMAR			0x10
+#define IDLE_STATUS_DMAW			0x20
+#define IDLE_STATUS_SMB				0x40
+#define IDLE_STATUS_CMB				0x80
+
+/* MDIO Control Register */
+#define MDIO_WAIT_TIMES				30
+
+/* MAC Control Register */
+#define MAC_CTRL_TX_PAUSE			0x10000
+#define MAC_CTRL_SCNT				0x20000
+#define MAC_CTRL_SRST_TX			0x40000
+#define MAC_CTRL_TX_SIMURST			0x80000
+#define MAC_CTRL_SPEED_SHIFT			20
+#define MAC_CTRL_SPEED_MASK			0x300000
+#define MAC_CTRL_SPEED_1000			0x2
+#define MAC_CTRL_SPEED_10_100			0x1
+#define MAC_CTRL_DBG_TX_BKPRESURE		0x400000
+#define MAC_CTRL_TX_HUGE			0x800000
+#define MAC_CTRL_RX_CHKSUM_EN			0x1000000
+#define MAC_CTRL_DBG				0x8000000
+
+/* Wake-On-Lan control register */
+#define WOL_CLK_SWITCH_EN			0x8000
+#define WOL_PT5_EN				0x200000
+#define WOL_PT6_EN				0x400000
+#define WOL_PT5_MATCH				0x8000000
+#define WOL_PT6_MATCH				0x10000000
+
+/* WOL Length ( 2 DWORD ) */
+#define REG_WOL_PATTERN_LEN			0x14A4
+#define WOL_PT_LEN_MASK				0x7F
+#define WOL_PT0_LEN_SHIFT			0
+#define WOL_PT1_LEN_SHIFT			8
+#define WOL_PT2_LEN_SHIFT			16
+#define WOL_PT3_LEN_SHIFT			24
+#define WOL_PT4_LEN_SHIFT			0
+#define WOL_PT5_LEN_SHIFT			8
+#define WOL_PT6_LEN_SHIFT			16
+
+/* Internal SRAM Partition Registers, low 32 bits */
+#define REG_SRAM_RFD_LEN			0x1504
+#define REG_SRAM_RRD_ADDR			0x1508
+#define REG_SRAM_RRD_LEN			0x150C
+#define REG_SRAM_TPD_ADDR			0x1510
+#define REG_SRAM_TPD_LEN			0x1514
+#define REG_SRAM_TRD_ADDR			0x1518
+#define REG_SRAM_TRD_LEN			0x151C
+#define REG_SRAM_RXF_ADDR			0x1520
+#define REG_SRAM_RXF_LEN			0x1524
+#define REG_SRAM_TXF_ADDR			0x1528
+#define REG_SRAM_TXF_LEN			0x152C
+#define REG_SRAM_TCPH_PATH_ADDR			0x1530
+#define SRAM_TCPH_ADDR_MASK			0xFFF
+#define SRAM_TCPH_ADDR_SHIFT			0
+#define SRAM_PATH_ADDR_MASK			0xFFF
+#define SRAM_PATH_ADDR_SHIFT			16
+
+/* Load Ptr Register */
+#define REG_LOAD_PTR				0x1534
+
+/* Descriptor Control registers, low 32 bits */
+#define REG_DESC_RFD_ADDR_LO			0x1544
+#define REG_DESC_RRD_ADDR_LO			0x1548
+#define REG_DESC_TPD_ADDR_LO			0x154C
+#define REG_DESC_CMB_ADDR_LO			0x1550
+#define REG_DESC_SMB_ADDR_LO			0x1554
+#define REG_DESC_RFD_RRD_RING_SIZE		0x1558
+#define DESC_RFD_RING_SIZE_MASK			0x7FF
+#define DESC_RFD_RING_SIZE_SHIFT		0
+#define DESC_RRD_RING_SIZE_MASK			0x7FF
+#define DESC_RRD_RING_SIZE_SHIFT		16
+#define REG_DESC_TPD_RING_SIZE			0x155C
+#define DESC_TPD_RING_SIZE_MASK			0x3FF
+#define DESC_TPD_RING_SIZE_SHIFT		0
+
+/* TXQ Control Register */
+#define REG_TXQ_CTRL				0x1580
+#define TXQ_CTRL_TPD_BURST_NUM_SHIFT		0
+#define TXQ_CTRL_TPD_BURST_NUM_MASK		0x1F
+#define TXQ_CTRL_EN				0x20
+#define TXQ_CTRL_ENH_MODE			0x40
+#define TXQ_CTRL_TPD_FETCH_TH_SHIFT		8
+#define TXQ_CTRL_TPD_FETCH_TH_MASK		0x3F
+#define TXQ_CTRL_TXF_BURST_NUM_SHIFT		16
+#define TXQ_CTRL_TXF_BURST_NUM_MASK		0xFFFF
+
+/* Jumbo packet Threshold for task offload */
+#define REG_TX_JUMBO_TASK_TH_TPD_IPG		0x1584
+#define TX_JUMBO_TASK_TH_MASK			0x7FF
+#define TX_JUMBO_TASK_TH_SHIFT			0
+#define TX_TPD_MIN_IPG_MASK			0x1F
+#define TX_TPD_MIN_IPG_SHIFT			16
+
+/* RXQ Control Register */
+#define REG_RXQ_CTRL				0x15A0
+#define RXQ_CTRL_RFD_BURST_NUM_SHIFT		0
+#define RXQ_CTRL_RFD_BURST_NUM_MASK		0xFF
+#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT		8
+#define RXQ_CTRL_RRD_BURST_THRESH_MASK		0xFF
+#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT		16
+#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK		0x1F
+#define RXQ_CTRL_CUT_THRU_EN			0x40000000
+#define RXQ_CTRL_EN				0x80000000
+
+/* Rx jumbo packet threshold and rrd  retirement timer */
+#define REG_RXQ_JMBOSZ_RRDTIM			0x15A4
+#define RXQ_JMBOSZ_TH_MASK			0x7FF
+#define RXQ_JMBOSZ_TH_SHIFT			0
+#define RXQ_JMBO_LKAH_MASK			0xF
+#define RXQ_JMBO_LKAH_SHIFT			11
+#define RXQ_RRD_TIMER_MASK			0xFFFF
+#define RXQ_RRD_TIMER_SHIFT			16
+
+/* RFD flow control register */
+#define REG_RXQ_RXF_PAUSE_THRESH		0x15A8
+#define RXQ_RXF_PAUSE_TH_HI_SHIFT		16
+#define RXQ_RXF_PAUSE_TH_HI_MASK		0xFFF
+#define RXQ_RXF_PAUSE_TH_LO_SHIFT		0
+#define RXQ_RXF_PAUSE_TH_LO_MASK		0xFFF
+
+/* RRD flow control register */
+#define REG_RXQ_RRD_PAUSE_THRESH		0x15AC
+#define RXQ_RRD_PAUSE_TH_HI_SHIFT		0
+#define RXQ_RRD_PAUSE_TH_HI_MASK		0xFFF
+#define RXQ_RRD_PAUSE_TH_LO_SHIFT		16
+#define RXQ_RRD_PAUSE_TH_LO_MASK		0xFFF
+
+/* DMA Engine Control Register */
+#define REG_DMA_CTRL				0x15C0
+#define DMA_CTRL_DMAR_IN_ORDER			0x1
+#define DMA_CTRL_DMAR_ENH_ORDER			0x2
+#define DMA_CTRL_DMAR_OUT_ORDER			0x4
+#define DMA_CTRL_RCB_VALUE			0x8
+#define DMA_CTRL_DMAR_BURST_LEN_SHIFT		4
+#define DMA_CTRL_DMAR_BURST_LEN_MASK		7
+#define DMA_CTRL_DMAW_BURST_LEN_SHIFT		7
+#define DMA_CTRL_DMAW_BURST_LEN_MASK		7
+#define DMA_CTRL_DMAR_EN			0x400
+#define DMA_CTRL_DMAW_EN			0x800
+
+/* CMB/SMB Control Register */
+#define REG_CSMB_CTRL				0x15D0
+#define CSMB_CTRL_CMB_NOW			1
+#define CSMB_CTRL_SMB_NOW			2
+#define CSMB_CTRL_CMB_EN			4
+#define CSMB_CTRL_SMB_EN			8
+
+/* CMB DMA Write Threshold Register */
+#define REG_CMB_WRITE_TH			0x15D4
+#define CMB_RRD_TH_SHIFT			0
+#define CMB_RRD_TH_MASK				0x7FF
+#define CMB_TPD_TH_SHIFT			16
+#define CMB_TPD_TH_MASK				0x7FF
+
+/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */
+#define REG_CMB_WRITE_TIMER			0x15D8
+#define CMB_RX_TM_SHIFT				0
+#define CMB_RX_TM_MASK				0xFFFF
+#define CMB_TX_TM_SHIFT				16
+#define CMB_TX_TM_MASK				0xFFFF
+
+/* Number of packet received since last CMB write */
+#define REG_CMB_RX_PKT_CNT			0x15DC
+
+/* Number of packet transmitted since last CMB write */
+#define REG_CMB_TX_PKT_CNT			0x15E0
+
+/* SMB auto DMA timer register */
+#define REG_SMB_TIMER				0x15E4
+
+/* Mailbox Register */
+#define REG_MAILBOX				0x15F0
+#define MB_RFD_PROD_INDX_SHIFT			0
+#define MB_RFD_PROD_INDX_MASK			0x7FF
+#define MB_RRD_CONS_INDX_SHIFT			11
+#define MB_RRD_CONS_INDX_MASK			0x7FF
+#define MB_TPD_PROD_INDX_SHIFT			22
+#define MB_TPD_PROD_INDX_MASK			0x3FF
+
+/* Interrupt Status Register */
+#define ISR_SMB					0x1
+#define ISR_TIMER				0x2
+#define ISR_MANUAL				0x4
+#define ISR_RXF_OV				0x8
+#define ISR_RFD_UNRUN				0x10
+#define ISR_RRD_OV				0x20
+#define ISR_TXF_UNRUN				0x40
+#define ISR_LINK				0x80
+#define ISR_HOST_RFD_UNRUN			0x100
+#define ISR_HOST_RRD_OV				0x200
+#define ISR_DMAR_TO_RST				0x400
+#define ISR_DMAW_TO_RST				0x800
+#define ISR_GPHY				0x1000
+#define ISR_RX_PKT				0x10000
+#define ISR_TX_PKT				0x20000
+#define ISR_TX_DMA				0x40000
+#define ISR_RX_DMA				0x80000
+#define ISR_CMB_RX				0x100000
+#define ISR_CMB_TX				0x200000
+#define ISR_MAC_RX				0x400000
+#define ISR_MAC_TX				0x800000
+#define ISR_DIS_SMB				0x20000000
+#define ISR_DIS_DMA				0x40000000
+
+/* Normal Interrupt mask  */
+#define IMR_NORMAL_MASK	(\
+	ISR_SMB		|\
+	ISR_GPHY	|\
+	ISR_PHY_LINKDOWN|\
+	ISR_DMAR_TO_RST	|\
+	ISR_DMAW_TO_RST	|\
+	ISR_CMB_TX	|\
+	ISR_CMB_RX)
+
+/* Debug Interrupt Mask  (enable all interrupt) */
+#define IMR_DEBUG_MASK	(\
+	ISR_SMB		|\
+	ISR_TIMER	|\
+	ISR_MANUAL	|\
+	ISR_RXF_OV	|\
+	ISR_RFD_UNRUN	|\
+	ISR_RRD_OV	|\
+	ISR_TXF_UNRUN	|\
+	ISR_LINK	|\
+	ISR_CMB_TX	|\
+	ISR_CMB_RX	|\
+	ISR_RX_PKT	|\
+	ISR_TX_PKT	|\
+	ISR_MAC_RX	|\
+	ISR_MAC_TX)
+
+#define MEDIA_TYPE_1000M_FULL			1
+#define MEDIA_TYPE_100M_FULL			2
+#define MEDIA_TYPE_100M_HALF			3
+#define MEDIA_TYPE_10M_FULL			4
+#define MEDIA_TYPE_10M_HALF			5
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT		0x002F	/* All but 1000-Half */
+
+#define MAX_JUMBO_FRAME_SIZE			10240
+
+#define ATL1_EEDUMP_LEN				48
+
+/* Statistics counters collected by the MAC */
+struct stats_msg_block {
+	/* rx */
+	u32 rx_ok;		/* good RX packets */
+	u32 rx_bcast;		/* good RX broadcast packets */
+	u32 rx_mcast;		/* good RX multicast packets */
+	u32 rx_pause;		/* RX pause frames */
+	u32 rx_ctrl;		/* RX control packets other than pause frames */
+	u32 rx_fcs_err;		/* RX packets with bad FCS */
+	u32 rx_len_err;		/* RX packets with length != actual size */
+	u32 rx_byte_cnt;	/* good bytes received. FCS is NOT included */
+	u32 rx_runt;		/* RX packets < 64 bytes with good FCS */
+	u32 rx_frag;		/* RX packets < 64 bytes with bad FCS */
+	u32 rx_sz_64;		/* 64 byte RX packets */
+	u32 rx_sz_65_127;
+	u32 rx_sz_128_255;
+	u32 rx_sz_256_511;
+	u32 rx_sz_512_1023;
+	u32 rx_sz_1024_1518;
+	u32 rx_sz_1519_max;	/* 1519 byte to MTU RX packets */
+	u32 rx_sz_ov;		/* truncated RX packets > MTU */
+	u32 rx_rxf_ov;		/* frames dropped due to RX FIFO overflow */
+	u32 rx_rrd_ov;		/* frames dropped due to RRD overflow */
+	u32 rx_align_err;	/* alignment errors */
+	u32 rx_bcast_byte_cnt;	/* RX broadcast bytes, excluding FCS */
+	u32 rx_mcast_byte_cnt;	/* RX multicast bytes, excluding FCS */
+	u32 rx_err_addr;	/* packets dropped due to address filtering */
+
+	/* tx */
+	u32 tx_ok;		/* good TX packets */
+	u32 tx_bcast;		/* good TX broadcast packets */
+	u32 tx_mcast;		/* good TX multicast packets */
+	u32 tx_pause;		/* TX pause frames */
+	u32 tx_exc_defer;	/* TX packets deferred excessively */
+	u32 tx_ctrl;		/* TX control frames, excluding pause frames */
+	u32 tx_defer;		/* TX packets deferred */
+	u32 tx_byte_cnt;	/* bytes transmitted, FCS is NOT included */
+	u32 tx_sz_64;		/* 64 byte TX packets */
+	u32 tx_sz_65_127;
+	u32 tx_sz_128_255;
+	u32 tx_sz_256_511;
+	u32 tx_sz_512_1023;
+	u32 tx_sz_1024_1518;
+	u32 tx_sz_1519_max;	/* 1519 byte to MTU TX packets */
+	u32 tx_1_col;		/* packets TX after a single collision */
+	u32 tx_2_col;		/* packets TX after multiple collisions */
+	u32 tx_late_col;	/* TX packets with late collisions */
+	u32 tx_abort_col;	/* TX packets aborted w/excessive collisions */
+	u32 tx_underrun;	/* TX packets aborted due to TX FIFO underrun
+				 * or TRD FIFO underrun */
+	u32 tx_rd_eop;		/* reads beyond the EOP into the next frame
+				 * when TRD was not written timely */
+	u32 tx_len_err;		/* TX packets where length != actual size */
+	u32 tx_trunc;		/* TX packets truncated due to size > MTU */
+	u32 tx_bcast_byte;	/* broadcast bytes transmitted, excluding FCS */
+	u32 tx_mcast_byte;	/* multicast bytes transmitted, excluding FCS */
+	u32 smb_updated;	/* 1: SMB Updated. This is used by software to
+				 * indicate the statistics update. Software
+				 * should clear this bit after retrieving the
+				 * statistics information. */
+};
+
+/* Coalescing Message Block */
+struct coals_msg_block {
+	u32 int_stats;		/* interrupt status */
+	u16 rrd_prod_idx;	/* TRD Producer Index. */
+	u16 rfd_cons_idx;	/* RFD Consumer Index. */
+	u16 update;		/* Selene sets this bit every time it DMAs the
+				 * CMB to host memory. Software should clear
+				 * this bit when CMB info is processed. */
+	u16 tpd_cons_idx;	/* TPD Consumer Index. */
+};
+
+/* RRD descriptor */
+struct rx_return_desc {
+	u8 num_buf;	/* Number of RFD buffers used by the received packet */
+	u8 resved;
+	u16 buf_indx;	/* RFD Index of the first buffer */
+	union {
+		u32 valid;
+		struct {
+			u16 rx_chksum;
+			u16 pkt_size;
+		} xsum_sz;
+	} xsz;
+
+	u16 pkt_flg;	/* Packet flags */
+	u16 err_flg;	/* Error flags */
+	u16 resved2;
+	u16 vlan_tag;	/* VLAN TAG */
+};
+
+#define PACKET_FLAG_ETH_TYPE	0x0080
+#define PACKET_FLAG_VLAN_INS	0x0100
+#define PACKET_FLAG_ERR		0x0200
+#define PACKET_FLAG_IPV4	0x0400
+#define PACKET_FLAG_UDP		0x0800
+#define PACKET_FLAG_TCP		0x1000
+#define PACKET_FLAG_BCAST	0x2000
+#define PACKET_FLAG_MCAST	0x4000
+#define PACKET_FLAG_PAUSE	0x8000
+
+#define ERR_FLAG_CRC		0x0001
+#define ERR_FLAG_CODE		0x0002
+#define ERR_FLAG_DRIBBLE	0x0004
+#define ERR_FLAG_RUNT		0x0008
+#define ERR_FLAG_OV		0x0010
+#define ERR_FLAG_TRUNC		0x0020
+#define ERR_FLAG_IP_CHKSUM	0x0040
+#define ERR_FLAG_L4_CHKSUM	0x0080
+#define ERR_FLAG_LEN		0x0100
+#define ERR_FLAG_DES_ADDR	0x0200
+
+/* RFD descriptor */
+struct rx_free_desc {
+	__le64 buffer_addr;	/* Address of the descriptor's data buffer */
+	__le16 buf_len;		/* Size of the receive buffer in host memory */
+	u16 coalese;		/* Update consumer index to host after the
+				 * reception of this frame */
+	/* __attribute__ ((packed)) is required */
+} __attribute__ ((packed));
+
+/*
+ * The L1 transmit packet descriptor is comprised of four 32-bit words.
+ *
+ *	31					0
+ *	+---------------------------------------+
+ *      |	Word 0: Buffer addr lo 		|
+ *      +---------------------------------------+
+ *      |	Word 1: Buffer addr hi		|
+ *      +---------------------------------------+
+ *      |		Word 2			|
+ *      +---------------------------------------+
+ *      |		Word 3			|
+ *      +---------------------------------------+
+ *
+ * Words 0 and 1 combine to form a 64-bit buffer address.
+ *
+ * Word 2 is self explanatory in the #define block below.
+ *
+ * Word 3 has two forms, depending upon the state of bits 3 and 4.
+ * If bits 3 and 4 are both zero, then bits 14:31 are unused by the
+ * hardware.  Otherwise, if either bit 3 or 4 is set, the definition
+ * of bits 14:31 vary according to the following depiction.
+ *
+ *	0	End of packet			0	End of packet
+ *	1	Coalesce			1	Coalesce
+ *	2	Insert VLAN tag			2	Insert VLAN tag
+ *	3	Custom csum enable = 0		3	Custom csum enable = 1
+ *	4	Segment enable = 1		4	Segment enable = 0
+ *	5	Generate IP checksum		5	Generate IP checksum
+ *	6	Generate TCP checksum		6	Generate TCP checksum
+ *	7	Generate UDP checksum		7	Generate UDP checksum
+ *	8	VLAN tagged			8	VLAN tagged
+ *	9	Ethernet frame type		9	Ethernet frame type
+ *	10-+ 					10-+
+ *	11 |	IP hdr length (10:13)		11 |	IP hdr length (10:13)
+ *	12 |	(num 32-bit words)		12 |	(num 32-bit words)
+ *	13-+					13-+
+ *	14-+					14	Unused
+ *	15 |	TCP hdr length (14:17)		15	Unused
+ *	16 |	(num 32-bit words)		16-+
+ *	17-+					17 |
+ *	18	Header TPD flag			18 |
+ *	19-+					19 |	Payload offset
+ *	20 |					20 |	    (16:23)
+ *	21 |					21 |
+ *	22 |					22 |
+ *	23 |					23-+
+ *	24 |					24-+
+ *	25 |	MSS (19:31)			25 |
+ *	26 |					26 |
+ *	27 |					27 |	Custom csum offset
+ *	28 |					28 |	     (24:31)
+ *	29 |					29 |
+ *	30 |					30 |
+ *	31-+					31-+
+ */
+
+/* tpd word 2 */
+#define TPD_BUFLEN_MASK		0x3FFF
+#define TPD_BUFLEN_SHIFT	0
+#define TPD_DMAINT_MASK		0x0001
+#define TPD_DMAINT_SHIFT	14
+#define TPD_PKTNT_MASK		0x0001
+#define TPD_PKTINT_SHIFT	15
+#define TPD_VLANTAG_MASK	0xFFFF
+#define TPD_VLAN_SHIFT		16
+
+/* tpd word 3 bits 0:13 */
+#define TPD_EOP_MASK		0x0001
+#define TPD_EOP_SHIFT		0
+#define TPD_COALESCE_MASK	0x0001
+#define TPD_COALESCE_SHIFT	1
+#define TPD_INS_VL_TAG_MASK	0x0001
+#define TPD_INS_VL_TAG_SHIFT	2
+#define TPD_CUST_CSUM_EN_MASK	0x0001
+#define TPD_CUST_CSUM_EN_SHIFT	3
+#define TPD_SEGMENT_EN_MASK	0x0001
+#define TPD_SEGMENT_EN_SHIFT	4
+#define TPD_IP_CSUM_MASK	0x0001
+#define TPD_IP_CSUM_SHIFT	5
+#define TPD_TCP_CSUM_MASK	0x0001
+#define TPD_TCP_CSUM_SHIFT	6
+#define TPD_UDP_CSUM_MASK	0x0001
+#define TPD_UDP_CSUM_SHIFT	7
+#define TPD_VL_TAGGED_MASK	0x0001
+#define TPD_VL_TAGGED_SHIFT	8
+#define TPD_ETHTYPE_MASK	0x0001
+#define TPD_ETHTYPE_SHIFT	9
+#define TPD_IPHL_MASK		0x000F
+#define TPD_IPHL_SHIFT		10
+
+/* tpd word 3 bits 14:31 if segment enabled */
+#define TPD_TCPHDRLEN_MASK	0x000F
+#define TPD_TCPHDRLEN_SHIFT	14
+#define TPD_HDRFLAG_MASK	0x0001
+#define TPD_HDRFLAG_SHIFT	18
+#define TPD_MSS_MASK		0x1FFF
+#define TPD_MSS_SHIFT		19
+
+/* tpd word 3 bits 16:31 if custom csum enabled */
+#define TPD_PLOADOFFSET_MASK	0x00FF
+#define TPD_PLOADOFFSET_SHIFT	16
+#define TPD_CCSUMOFFSET_MASK	0x00FF
+#define TPD_CCSUMOFFSET_SHIFT	24
+
+struct tx_packet_desc {
+	__le64 buffer_addr;
+	__le32 word2;
+	__le32 word3;
+};
+
+/* DMA Order Settings */
+enum atl1_dma_order {
+	atl1_dma_ord_in = 1,
+	atl1_dma_ord_enh = 2,
+	atl1_dma_ord_out = 4
+};
+
+enum atl1_dma_rcb {
+	atl1_rcb_64 = 0,
+	atl1_rcb_128 = 1
+};
+
+enum atl1_dma_req_block {
+	atl1_dma_req_128 = 0,
+	atl1_dma_req_256 = 1,
+	atl1_dma_req_512 = 2,
+	atl1_dma_req_1024 = 3,
+	atl1_dma_req_2048 = 4,
+	atl1_dma_req_4096 = 5
+};
+
+#define ATL1_MAX_INTR		3
+#define ATL1_MAX_TX_BUF_LEN	0x3000	/* 12288 bytes */
+
+#define ATL1_DEFAULT_TPD	256
+#define ATL1_MAX_TPD		1024
+#define ATL1_MIN_TPD		64
+#define ATL1_DEFAULT_RFD	512
+#define ATL1_MIN_RFD		128
+#define ATL1_MAX_RFD		2048
+#define ATL1_REG_COUNT		1538
+
+#define ATL1_GET_DESC(R, i, type)	(&(((type *)((R)->desc))[i]))
+#define ATL1_RFD_DESC(R, i)	ATL1_GET_DESC(R, i, struct rx_free_desc)
+#define ATL1_TPD_DESC(R, i)	ATL1_GET_DESC(R, i, struct tx_packet_desc)
+#define ATL1_RRD_DESC(R, i)	ATL1_GET_DESC(R, i, struct rx_return_desc)
+
+/*
+ * atl1_ring_header represents a single, contiguous block of DMA space
+ * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
+ * message blocks (cmb, smb) described below
+ */
+struct atl1_ring_header {
+	void *desc;		/* virtual address */
+	dma_addr_t dma;		/* physical address*/
+	unsigned int size;	/* length in bytes */
+};
+
+/*
+ * atl1_buffer is wrapper around a pointer to a socket buffer
+ * so a DMA handle can be stored along with the skb
+ */
+struct atl1_buffer {
+	struct sk_buff *skb;	/* socket buffer */
+	u16 length;		/* rx buffer length */
+	u16 alloced;		/* 1 if skb allocated */
+	dma_addr_t dma;
+};
+
+/* transmit packet descriptor (tpd) ring */
+struct atl1_tpd_ring {
+	void *desc;		/* descriptor ring virtual address */
+	dma_addr_t dma;		/* descriptor ring physical address */
+	u16 size;		/* descriptor ring length in bytes */
+	u16 count;		/* number of descriptors in the ring */
+	u16 hw_idx;		/* hardware index */
+	atomic_t next_to_clean;
+	atomic_t next_to_use;
+	struct atl1_buffer *buffer_info;
+};
+
+/* receive free descriptor (rfd) ring */
+struct atl1_rfd_ring {
+	void *desc;		/* descriptor ring virtual address */
+	dma_addr_t dma;		/* descriptor ring physical address */
+	u16 size;		/* descriptor ring length in bytes */
+	u16 count;		/* number of descriptors in the ring */
+	atomic_t next_to_use;
+	u16 next_to_clean;
+	struct atl1_buffer *buffer_info;
+};
+
+/* receive return descriptor (rrd) ring */
+struct atl1_rrd_ring {
+	void *desc;		/* descriptor ring virtual address */
+	dma_addr_t dma;		/* descriptor ring physical address */
+	unsigned int size;	/* descriptor ring length in bytes */
+	u16 count;		/* number of descriptors in the ring */
+	u16 next_to_use;
+	atomic_t next_to_clean;
+};
+
+/* coalescing message block (cmb) */
+struct atl1_cmb {
+	struct coals_msg_block *cmb;
+	dma_addr_t dma;
+};
+
+/* statistics message block (smb) */
+struct atl1_smb {
+	struct stats_msg_block *smb;
+	dma_addr_t dma;
+};
+
+/* Statistics counters */
+struct atl1_sft_stats {
+	u64 rx_packets;
+	u64 tx_packets;
+	u64 rx_bytes;
+	u64 tx_bytes;
+	u64 multicast;
+	u64 collisions;
+	u64 rx_errors;
+	u64 rx_length_errors;
+	u64 rx_crc_errors;
+	u64 rx_frame_errors;
+	u64 rx_fifo_errors;
+	u64 rx_missed_errors;
+	u64 tx_errors;
+	u64 tx_fifo_errors;
+	u64 tx_aborted_errors;
+	u64 tx_window_errors;
+	u64 tx_carrier_errors;
+	u64 tx_pause;		/* TX pause frames */
+	u64 excecol;		/* TX packets w/ excessive collisions */
+	u64 deffer;		/* TX packets deferred */
+	u64 scc;		/* packets TX after a single collision */
+	u64 mcc;		/* packets TX after multiple collisions */
+	u64 latecol;		/* TX packets w/ late collisions */
+	u64 tx_underun;		/* TX packets aborted due to TX FIFO underrun
+				 * or TRD FIFO underrun */
+	u64 tx_trunc;		/* TX packets truncated due to size > MTU */
+	u64 rx_pause;		/* num Pause packets received. */
+	u64 rx_rrd_ov;
+	u64 rx_trunc;
+};
+
+/* hardware structure */
+struct atl1_hw {
+	u8 __iomem *hw_addr;
+	struct atl1_adapter *back;
+	enum atl1_dma_order dma_ord;
+	enum atl1_dma_rcb rcb_value;
+	enum atl1_dma_req_block dmar_block;
+	enum atl1_dma_req_block dmaw_block;
+	u8 preamble_len;
+	u8 max_retry;
+	u8 jam_ipg;		/* IPG to start JAM for collision based flow
+				 * control in half-duplex mode. In units of
+				 * 8-bit time */
+	u8 ipgt;		/* Desired back to back inter-packet gap.
+				 * The default is 96-bit time */
+	u8 min_ifg;		/* Minimum number of IFG to enforce in between
+				 * receive frames. Frame gap below such IFP
+				 * is dropped */
+	u8 ipgr1;		/* 64bit Carrier-Sense window */
+	u8 ipgr2;		/* 96-bit IPG window */
+	u8 tpd_burst;		/* Number of TPD to prefetch in cache-aligned
+				 * burst. Each TPD is 16 bytes long */
+	u8 rfd_burst;		/* Number of RFD to prefetch in cache-aligned
+				 * burst. Each RFD is 12 bytes long */
+	u8 rfd_fetch_gap;
+	u8 rrd_burst;		/* Threshold number of RRDs that can be retired
+				 * in a burst. Each RRD is 16 bytes long */
+	u8 tpd_fetch_th;
+	u8 tpd_fetch_gap;
+	u16 tx_jumbo_task_th;
+	u16 txf_burst;		/* Number of data bytes to read in a cache-
+				 * aligned burst. Each SRAM entry is 8 bytes */
+	u16 rx_jumbo_th;	/* Jumbo packet size for non-VLAN packet. VLAN
+				 * packets should add 4 bytes */
+	u16 rx_jumbo_lkah;
+	u16 rrd_ret_timer;	/* RRD retirement timer. Decrement by 1 after
+				 * every 512ns passes. */
+	u16 lcol;		/* Collision Window */
+
+	u16 cmb_tpd;
+	u16 cmb_rrd;
+	u16 cmb_rx_timer;
+	u16 cmb_tx_timer;
+	u32 smb_timer;
+	u16 media_type;
+	u16 autoneg_advertised;
+
+	u16 mii_autoneg_adv_reg;
+	u16 mii_1000t_ctrl_reg;
+
+	u32 max_frame_size;
+	u32 min_frame_size;
+
+	u16 dev_rev;
+
+	/* spi flash */
+	u8 flash_vendor;
+
+	u8 mac_addr[ETH_ALEN];
+	u8 perm_mac_addr[ETH_ALEN];
+
+	bool phy_configured;
+};
+
+struct atl1_adapter {
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+	struct atl1_sft_stats soft_stats;
+	struct vlan_group *vlgrp;
+	u32 rx_buffer_len;
+	u32 wol;
+	u16 link_speed;
+	u16 link_duplex;
+	spinlock_t lock;
+	struct work_struct tx_timeout_task;
+	struct work_struct link_chg_task;
+	struct work_struct pcie_dma_to_rst_task;
+	struct timer_list watchdog_timer;
+	struct timer_list phy_config_timer;
+	bool phy_timer_pending;
+
+	/* all descriptor rings' memory */
+	struct atl1_ring_header ring_header;
+
+	/* TX */
+	struct atl1_tpd_ring tpd_ring;
+	spinlock_t mb_lock;
+
+	/* RX */
+	struct atl1_rfd_ring rfd_ring;
+	struct atl1_rrd_ring rrd_ring;
+	u64 hw_csum_err;
+	u64 hw_csum_good;
+	u32 msg_enable;
+	u16 imt;		/* interrupt moderator timer (2us resolution) */
+	u16 ict;		/* interrupt clear timer (2us resolution */
+	struct mii_if_info mii;	/* MII interface info */
+
+	u32 bd_number;		/* board number */
+	bool pci_using_64;
+	struct atl1_hw hw;
+	struct atl1_smb smb;
+	struct atl1_cmb cmb;
+};
+
+#endif /* ATL1_H */

+ 433 - 0
drivers/net/atlx/atlx.c

@@ -0,0 +1,433 @@
+/* atlx.c -- common functions for Attansic network drivers
+ *
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+/* Including this file like a header is a temporary hack, I promise. -- CHS */
+#ifndef ATLX_C
+#define ATLX_C
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "atlx.h"
+
+static struct atlx_spi_flash_dev flash_table[] = {
+/*	MFR_NAME  WRSR  READ  PRGM  WREN  WRDI  RDSR  RDID  SEC_ERS CHIP_ERS */
+	{"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52,   0x62},
+	{"SST",   0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20,   0x60},
+	{"ST",    0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8,   0xC7},
+};
+
+static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return atlx_mii_ioctl(netdev, ifr, cmd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/*
+ * atlx_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atlx_set_mac(struct net_device *netdev, void *p)
+{
+	struct atlx_adapter *adapter = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+	atlx_set_mac_addr(&adapter->hw);
+	return 0;
+}
+
+static void atlx_check_for_link(struct atlx_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u16 phy_data = 0;
+
+	spin_lock(&adapter->lock);
+	adapter->phy_timer_pending = false;
+	atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+	atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+	spin_unlock(&adapter->lock);
+
+	/* notify upper layer link down ASAP */
+	if (!(phy_data & BMSR_LSTATUS)) {
+		/* Link Down */
+		if (netif_carrier_ok(netdev)) {
+			/* old link state: Up */
+			dev_info(&adapter->pdev->dev, "%s link is down\n",
+				netdev->name);
+			adapter->link_speed = SPEED_0;
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+	}
+	schedule_work(&adapter->link_chg_task);
+}
+
+/*
+ * atlx_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atlx_set_multi(struct net_device *netdev)
+{
+	struct atlx_adapter *adapter = netdev_priv(netdev);
+	struct atlx_hw *hw = &adapter->hw;
+	struct dev_mc_list *mc_ptr;
+	u32 rctl;
+	u32 hash_value;
+
+	/* Check for Promiscuous and All Multicast modes */
+	rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
+	if (netdev->flags & IFF_PROMISC)
+		rctl |= MAC_CTRL_PROMIS_EN;
+	else if (netdev->flags & IFF_ALLMULTI) {
+		rctl |= MAC_CTRL_MC_ALL_EN;
+		rctl &= ~MAC_CTRL_PROMIS_EN;
+	} else
+		rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+
+	iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
+
+	/* clear the old settings from the multicast hash table */
+	iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
+	iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
+
+	/* compute mc addresses' hash value ,and put it into hash table */
+	for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+		hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr);
+		atlx_hash_set(hw, hash_value);
+	}
+}
+
+/*
+ * atlx_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static void atlx_irq_enable(struct atlx_adapter *adapter)
+{
+	iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
+	ioread32(adapter->hw.hw_addr + REG_IMR);
+}
+
+/*
+ * atlx_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static void atlx_irq_disable(struct atlx_adapter *adapter)
+{
+	iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+	ioread32(adapter->hw.hw_addr + REG_IMR);
+	synchronize_irq(adapter->pdev->irq);
+}
+
+static void atlx_clear_phy_int(struct atlx_adapter *adapter)
+{
+	u16 phy_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->lock, flags);
+	atlx_read_phy_reg(&adapter->hw, 19, &phy_data);
+	spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+/*
+ * atlx_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ */
+static struct net_device_stats *atlx_get_stats(struct net_device *netdev)
+{
+	struct atlx_adapter *adapter = netdev_priv(netdev);
+	return &adapter->net_stats;
+}
+
+/*
+ * atlx_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atlx_tx_timeout(struct net_device *netdev)
+{
+	struct atlx_adapter *adapter = netdev_priv(netdev);
+	/* Do the reset outside of interrupt context */
+	schedule_work(&adapter->tx_timeout_task);
+}
+
+/*
+ * atlx_link_chg_task - deal with link change event Out of interrupt context
+ */
+static void atlx_link_chg_task(struct work_struct *work)
+{
+	struct atlx_adapter *adapter;
+	unsigned long flags;
+
+	adapter = container_of(work, struct atlx_adapter, link_chg_task);
+
+	spin_lock_irqsave(&adapter->lock, flags);
+	atlx_check_link(adapter);
+	spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static void atlx_vlan_rx_register(struct net_device *netdev,
+	struct vlan_group *grp)
+{
+	struct atlx_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
+	u32 ctrl;
+
+	spin_lock_irqsave(&adapter->lock, flags);
+	/* atlx_irq_disable(adapter); FIXME: confirm/remove */
+	adapter->vlgrp = grp;
+
+	if (grp) {
+		/* enable VLAN tag insert/strip */
+		ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
+		ctrl |= MAC_CTRL_RMV_VLAN;
+		iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
+	} else {
+		/* disable VLAN tag insert/strip */
+		ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
+		ctrl &= ~MAC_CTRL_RMV_VLAN;
+		iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
+	}
+
+	/* atlx_irq_enable(adapter); FIXME */
+	spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static void atlx_restore_vlan(struct atlx_adapter *adapter)
+{
+	atlx_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+}
+
+/*
+ * This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+#define ATL1_MAX_NIC 4
+
+#define OPTION_UNSET    -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
+
+/*
+ * Interrupt Moderate Timer in units of 2 us
+ *
+ * Valid Range: 10-65535
+ *
+ * Default Value: 100 (200us)
+ */
+static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
+static int num_int_mod_timer;
+module_param_array_named(int_mod_timer, int_mod_timer, int,
+	&num_int_mod_timer, 0);
+MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
+
+/*
+ * flash_vendor
+ *
+ * Valid Range: 0-2
+ *
+ * 0 - Atmel
+ * 1 - SST
+ * 2 - ST
+ *
+ * Default Value: 0
+ */
+static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
+static int num_flash_vendor;
+module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0);
+MODULE_PARM_DESC(flash_vendor, "SPI flash vendor");
+
+#define DEFAULT_INT_MOD_CNT	100	/* 200us */
+#define MAX_INT_MOD_CNT		65000
+#define MIN_INT_MOD_CNT		50
+
+#define FLASH_VENDOR_DEFAULT	0
+#define FLASH_VENDOR_MIN	0
+#define FLASH_VENDOR_MAX	2
+
+struct atl1_option {
+	enum { enable_option, range_option, list_option } type;
+	char *name;
+	char *err;
+	int def;
+	union {
+		struct {	/* range_option info */
+			int min;
+			int max;
+		} r;
+		struct {	/* list_option info */
+			int nr;
+			struct atl1_opt_list {
+				int i;
+				char *str;
+			} *p;
+		} l;
+	} arg;
+};
+
+static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
+	struct pci_dev *pdev)
+{
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			dev_info(&pdev->dev, "%s enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			dev_info(&pdev->dev, "%s disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			dev_info(&pdev->dev, "%s set to %i\n", opt->name,
+				*value);
+			return 0;
+		}
+		break;
+	case list_option:{
+			int i;
+			struct atl1_opt_list *ent;
+
+			for (i = 0; i < opt->arg.l.nr; i++) {
+				ent = &opt->arg.l.p[i];
+				if (*value == ent->i) {
+					if (ent->str[0] != '\0')
+						dev_info(&pdev->dev, "%s\n",
+							ent->str);
+					return 0;
+				}
+			}
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
+		opt->name, *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+/*
+ * atl1_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ */
+void __devinit atl1_check_options(struct atl1_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int bd = adapter->bd_number;
+	if (bd >= ATL1_MAX_NIC) {
+		dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
+		dev_notice(&pdev->dev, "using defaults for all values\n");
+	}
+	{			/* Interrupt Moderate Timer */
+		struct atl1_option opt = {
+			.type = range_option,
+			.name = "Interrupt Moderator Timer",
+			.err = "using default of "
+				__MODULE_STRING(DEFAULT_INT_MOD_CNT),
+			.def = DEFAULT_INT_MOD_CNT,
+			.arg = {.r = {.min = MIN_INT_MOD_CNT,
+					.max = MAX_INT_MOD_CNT} }
+		};
+		int val;
+		if (num_int_mod_timer > bd) {
+			val = int_mod_timer[bd];
+			atl1_validate_option(&val, &opt, pdev);
+			adapter->imt = (u16) val;
+		} else
+			adapter->imt = (u16) (opt.def);
+	}
+
+	{			/* Flash Vendor */
+		struct atl1_option opt = {
+			.type = range_option,
+			.name = "SPI Flash Vendor",
+			.err = "using default of "
+				__MODULE_STRING(FLASH_VENDOR_DEFAULT),
+			.def = DEFAULT_INT_MOD_CNT,
+			.arg = {.r = {.min = FLASH_VENDOR_MIN,
+					.max = FLASH_VENDOR_MAX} }
+		};
+		int val;
+		if (num_flash_vendor > bd) {
+			val = flash_vendor[bd];
+			atl1_validate_option(&val, &opt, pdev);
+			adapter->hw.flash_vendor = (u8) val;
+		} else
+			adapter->hw.flash_vendor = (u8) (opt.def);
+	}
+}
+
+#endif /* ATLX_C */

+ 506 - 0
drivers/net/atlx/atlx.h

@@ -0,0 +1,506 @@
+/* atlx_hw.h -- common hardware definitions for Attansic network drivers
+ *
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#ifndef ATLX_H
+#define ATLX_H
+
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define ATLX_DRIVER_VERSION "2.1.1"
+MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
+	Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ATLX_DRIVER_VERSION);
+
+#define ATLX_ERR_PHY			2
+#define ATLX_ERR_PHY_SPEED		7
+#define ATLX_ERR_PHY_RES		8
+
+#define SPEED_0				0xffff
+#define SPEED_10			10
+#define SPEED_100			100
+#define SPEED_1000			1000
+#define HALF_DUPLEX			1
+#define FULL_DUPLEX			2
+
+#define MEDIA_TYPE_AUTO_SENSOR		0
+
+/* register definitions */
+#define REG_PM_CTRLSTAT			0x44
+
+#define REG_PCIE_CAP_LIST		0x58
+
+#define REG_VPD_CAP			0x6C
+#define VPD_CAP_ID_MASK			0xFF
+#define VPD_CAP_ID_SHIFT		0
+#define VPD_CAP_NEXT_PTR_MASK		0xFF
+#define VPD_CAP_NEXT_PTR_SHIFT		8
+#define VPD_CAP_VPD_ADDR_MASK		0x7FFF
+#define VPD_CAP_VPD_ADDR_SHIFT		16
+#define VPD_CAP_VPD_FLAG		0x80000000
+
+#define REG_VPD_DATA			0x70
+
+#define REG_SPI_FLASH_CTRL		0x200
+#define SPI_FLASH_CTRL_STS_NON_RDY	0x1
+#define SPI_FLASH_CTRL_STS_WEN		0x2
+#define SPI_FLASH_CTRL_STS_WPEN		0x80
+#define SPI_FLASH_CTRL_DEV_STS_MASK	0xFF
+#define SPI_FLASH_CTRL_DEV_STS_SHIFT	0
+#define SPI_FLASH_CTRL_INS_MASK		0x7
+#define SPI_FLASH_CTRL_INS_SHIFT	8
+#define SPI_FLASH_CTRL_START		0x800
+#define SPI_FLASH_CTRL_EN_VPD		0x2000
+#define SPI_FLASH_CTRL_LDSTART		0x8000
+#define SPI_FLASH_CTRL_CS_HI_MASK	0x3
+#define SPI_FLASH_CTRL_CS_HI_SHIFT	16
+#define SPI_FLASH_CTRL_CS_HOLD_MASK	0x3
+#define SPI_FLASH_CTRL_CS_HOLD_SHIFT	18
+#define SPI_FLASH_CTRL_CLK_LO_MASK	0x3
+#define SPI_FLASH_CTRL_CLK_LO_SHIFT	20
+#define SPI_FLASH_CTRL_CLK_HI_MASK	0x3
+#define SPI_FLASH_CTRL_CLK_HI_SHIFT	22
+#define SPI_FLASH_CTRL_CS_SETUP_MASK	0x3
+#define SPI_FLASH_CTRL_CS_SETUP_SHIFT	24
+#define SPI_FLASH_CTRL_EROM_PGSZ_MASK	0x3
+#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT	26
+#define SPI_FLASH_CTRL_WAIT_READY	0x10000000
+
+#define REG_SPI_ADDR			0x204
+
+#define REG_SPI_DATA			0x208
+
+#define REG_SPI_FLASH_CONFIG		0x20C
+#define SPI_FLASH_CONFIG_LD_ADDR_MASK	0xFFFFFF
+#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT	0
+#define SPI_FLASH_CONFIG_VPD_ADDR_MASK	0x3
+#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT	24
+#define SPI_FLASH_CONFIG_LD_EXIST	0x4000000
+
+#define REG_SPI_FLASH_OP_PROGRAM	0x210
+#define REG_SPI_FLASH_OP_SC_ERASE	0x211
+#define REG_SPI_FLASH_OP_CHIP_ERASE	0x212
+#define REG_SPI_FLASH_OP_RDID		0x213
+#define REG_SPI_FLASH_OP_WREN		0x214
+#define REG_SPI_FLASH_OP_RDSR		0x215
+#define REG_SPI_FLASH_OP_WRSR		0x216
+#define REG_SPI_FLASH_OP_READ		0x217
+
+#define REG_TWSI_CTRL			0x218
+#define TWSI_CTRL_LD_OFFSET_MASK	0xFF
+#define TWSI_CTRL_LD_OFFSET_SHIFT	0
+#define TWSI_CTRL_LD_SLV_ADDR_MASK	0x7
+#define TWSI_CTRL_LD_SLV_ADDR_SHIFT	8
+#define TWSI_CTRL_SW_LDSTART		0x800
+#define TWSI_CTRL_HW_LDSTART		0x1000
+#define TWSI_CTRL_SMB_SLV_ADDR_MASK	0x7F
+#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT	15
+#define TWSI_CTRL_LD_EXIST		0x400000
+#define TWSI_CTRL_READ_FREQ_SEL_MASK	0x3
+#define TWSI_CTRL_READ_FREQ_SEL_SHIFT	23
+#define TWSI_CTRL_FREQ_SEL_100K		0
+#define TWSI_CTRL_FREQ_SEL_200K		1
+#define TWSI_CTRL_FREQ_SEL_300K		2
+#define TWSI_CTRL_FREQ_SEL_400K		3
+#define TWSI_CTRL_SMB_SLV_ADDR		/* FIXME: define or remove */
+#define TWSI_CTRL_WRITE_FREQ_SEL_MASK	0x3
+#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT	24
+
+#define REG_PCIE_DEV_MISC_CTRL			0x21C
+#define PCIE_DEV_MISC_CTRL_EXT_PIPE		0x2
+#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS		0x1
+#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST		0x4
+#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN	0x8
+#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN	0x10
+
+#define REG_PCIE_PHYMISC		0x1000
+#define PCIE_PHYMISC_FORCE_RCV_DET	0x4
+
+#define REG_PCIE_DLL_TX_CTRL1		0x1104
+#define PCIE_DLL_TX_CTRL1_SEL_NOR_CLK	0x400
+#define PCIE_DLL_TX_CTRL1_DEF		0x568
+
+#define REG_LTSSM_TEST_MODE		0x12FC
+#define LTSSM_TEST_MODE_DEF		0x6500
+
+/* Master Control Register */
+#define REG_MASTER_CTRL			0x1400
+#define MASTER_CTRL_SOFT_RST		0x1
+#define MASTER_CTRL_MTIMER_EN		0x2
+#define MASTER_CTRL_ITIMER_EN		0x4
+#define MASTER_CTRL_MANUAL_INT		0x8
+#define MASTER_CTRL_REV_NUM_SHIFT	16
+#define MASTER_CTRL_REV_NUM_MASK	0xFF
+#define MASTER_CTRL_DEV_ID_SHIFT	24
+#define MASTER_CTRL_DEV_ID_MASK		0xFF
+
+/* Timer Initial Value Register */
+#define REG_MANUAL_TIMER_INIT		0x1404
+
+/* IRQ Moderator Timer Initial Value Register */
+#define REG_IRQ_MODU_TIMER_INIT		0x1408
+
+#define REG_PHY_ENABLE			0x140C
+
+/* IRQ Anti-Lost Timer Initial Value Register */
+#define REG_CMBDISDMA_TIMER		0x140E
+
+/* Block IDLE Status Register */
+#define REG_IDLE_STATUS			0x1410
+
+/* MDIO Control Register */
+#define REG_MDIO_CTRL			0x1414
+#define MDIO_DATA_MASK			0xFFFF
+#define MDIO_DATA_SHIFT			0
+#define MDIO_REG_ADDR_MASK		0x1F
+#define MDIO_REG_ADDR_SHIFT		16
+#define MDIO_RW				0x200000
+#define MDIO_SUP_PREAMBLE		0x400000
+#define MDIO_START			0x800000
+#define MDIO_CLK_SEL_SHIFT		24
+#define MDIO_CLK_25_4			0
+#define MDIO_CLK_25_6			2
+#define MDIO_CLK_25_8			3
+#define MDIO_CLK_25_10			4
+#define MDIO_CLK_25_14			5
+#define MDIO_CLK_25_20			6
+#define MDIO_CLK_25_28			7
+#define MDIO_BUSY			0x8000000
+
+/* MII PHY Status Register */
+#define REG_PHY_STATUS			0x1418
+
+/* BIST Control and Status Register0 (for the Packet Memory) */
+#define REG_BIST0_CTRL			0x141C
+#define BIST0_NOW			0x1
+#define BIST0_SRAM_FAIL			0x2
+#define BIST0_FUSE_FLAG			0x4
+#define REG_BIST1_CTRL			0x1420
+#define BIST1_NOW			0x1
+#define BIST1_SRAM_FAIL			0x2
+#define BIST1_FUSE_FLAG			0x4
+
+/* SerDes Lock Detect Control and Status Register */
+#define REG_SERDES_LOCK			0x1424
+#define SERDES_LOCK_DETECT		1
+#define SERDES_LOCK_DETECT_EN		2
+
+/* MAC Control Register */
+#define REG_MAC_CTRL			0x1480
+#define MAC_CTRL_TX_EN			1
+#define MAC_CTRL_RX_EN			2
+#define MAC_CTRL_TX_FLOW		4
+#define MAC_CTRL_RX_FLOW		8
+#define MAC_CTRL_LOOPBACK		0x10
+#define MAC_CTRL_DUPLX			0x20
+#define MAC_CTRL_ADD_CRC		0x40
+#define MAC_CTRL_PAD			0x80
+#define MAC_CTRL_LENCHK			0x100
+#define MAC_CTRL_HUGE_EN		0x200
+#define MAC_CTRL_PRMLEN_SHIFT		10
+#define MAC_CTRL_PRMLEN_MASK		0xF
+#define MAC_CTRL_RMV_VLAN		0x4000
+#define MAC_CTRL_PROMIS_EN		0x8000
+#define MAC_CTRL_MC_ALL_EN		0x2000000
+#define MAC_CTRL_BC_EN			0x4000000
+
+/* MAC IPG/IFG Control Register */
+#define REG_MAC_IPG_IFG			0x1484
+#define MAC_IPG_IFG_IPGT_SHIFT		0
+#define MAC_IPG_IFG_IPGT_MASK		0x7F
+#define MAC_IPG_IFG_MIFG_SHIFT		8
+#define MAC_IPG_IFG_MIFG_MASK		0xFF
+#define MAC_IPG_IFG_IPGR1_SHIFT		16
+#define MAC_IPG_IFG_IPGR1_MASK		0x7F
+#define MAC_IPG_IFG_IPGR2_SHIFT		24
+#define MAC_IPG_IFG_IPGR2_MASK		0x7F
+
+/* MAC STATION ADDRESS */
+#define REG_MAC_STA_ADDR		0x1488
+
+/* Hash table for multicast address */
+#define REG_RX_HASH_TABLE		0x1490
+
+/* MAC Half-Duplex Control Register */
+#define REG_MAC_HALF_DUPLX_CTRL			0x1498
+#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT		0
+#define MAC_HALF_DUPLX_CTRL_LCOL_MASK		0x3FF
+#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT		12
+#define MAC_HALF_DUPLX_CTRL_RETRY_MASK		0xF
+#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN		0x10000
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_C		0x20000
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_P		0x40000
+#define MAC_HALF_DUPLX_CTRL_ABEBE		0x80000
+#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT		20
+#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK		0xF
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT	24
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK		0xF
+
+/* Maximum Frame Length Control Register */
+#define REG_MTU				0x149C
+
+/* Wake-On-Lan control register */
+#define REG_WOL_CTRL			0x14A0
+#define WOL_PATTERN_EN			0x1
+#define WOL_PATTERN_PME_EN		0x2
+#define WOL_MAGIC_EN			0x4
+#define WOL_MAGIC_PME_EN		0x8
+#define WOL_LINK_CHG_EN			0x10
+#define WOL_LINK_CHG_PME_EN		0x20
+#define WOL_PATTERN_ST			0x100
+#define WOL_MAGIC_ST			0x200
+#define WOL_LINKCHG_ST			0x400
+#define WOL_PT0_EN			0x10000
+#define WOL_PT1_EN			0x20000
+#define WOL_PT2_EN			0x40000
+#define WOL_PT3_EN			0x80000
+#define WOL_PT4_EN			0x100000
+#define WOL_PT0_MATCH			0x1000000
+#define WOL_PT1_MATCH			0x2000000
+#define WOL_PT2_MATCH			0x4000000
+#define WOL_PT3_MATCH			0x8000000
+#define WOL_PT4_MATCH			0x10000000
+
+/* Internal SRAM Partition Register, high 32 bits */
+#define REG_SRAM_RFD_ADDR		0x1500
+
+/* Descriptor Control register, high 32 bits */
+#define REG_DESC_BASE_ADDR_HI		0x1540
+
+/* Interrupt Status Register */
+#define REG_ISR				0x1600
+#define ISR_UR_DETECTED			0x1000000
+#define ISR_FERR_DETECTED		0x2000000
+#define ISR_NFERR_DETECTED		0x4000000
+#define ISR_CERR_DETECTED		0x8000000
+#define ISR_PHY_LINKDOWN		0x10000000
+#define ISR_DIS_INT			0x80000000
+
+/* Interrupt Mask Register */
+#define REG_IMR				0x1604
+
+#define REG_RFD_RRD_IDX			0x1800
+#define REG_TPD_IDX			0x1804
+
+/* MII definitions */
+
+/* PHY Common Register */
+#define MII_ATLX_CR			0x09
+#define MII_ATLX_SR			0x0A
+#define MII_ATLX_ESR			0x0F
+#define MII_ATLX_PSCR			0x10
+#define MII_ATLX_PSSR			0x11
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB		0x0040	/* bits 6,13: 10=1000, 01=100,
+						 * 00=10
+						 */
+#define MII_CR_COLL_TEST_ENABLE		0x0080	/* Collision test enable */
+#define MII_CR_FULL_DUPLEX		0x0100	/* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG		0x0200	/* Restart auto negotiation */
+#define MII_CR_ISOLATE			0x0400	/* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN		0x0800	/* Power down */
+#define MII_CR_AUTO_NEG_EN		0x1000	/* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB		0x2000	/* bits 6,13: 10=1000, 01=100,
+						 * 00=10
+						 */
+#define MII_CR_LOOPBACK			0x4000	/* 0 = normal, 1 = loopback */
+#define MII_CR_RESET			0x8000	/* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_MASK		0x2040
+#define MII_CR_SPEED_1000		0x0040
+#define MII_CR_SPEED_100		0x2000
+#define MII_CR_SPEED_10			0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS		0x0001	/* Ext register capabilities */
+#define MII_SR_JABBER_DETECT		0x0002	/* Jabber Detected */
+#define MII_SR_LINK_STATUS		0x0004	/* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS		0x0008	/* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT		0x0010	/* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE		0x0020	/* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS	0x0040	/* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS		0x0100	/* Ext stat info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS		0x0200	/* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS		0x0400	/* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS		0x0800	/* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS		0x1000	/* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS		0x2000	/* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS		0x4000	/* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS		0x8000	/* 100T4 Capable */
+
+/* Link partner ability register */
+#define MII_LPA_SLCT			0x001f	/* Same as advertise selector */
+#define MII_LPA_10HALF			0x0020	/* Can do 10mbps half-duplex */
+#define MII_LPA_10FULL			0x0040	/* Can do 10mbps full-duplex */
+#define MII_LPA_100HALF			0x0080	/* Can do 100mbps half-duplex */
+#define MII_LPA_100FULL			0x0100	/* Can do 100mbps full-duplex */
+#define MII_LPA_100BASE4		0x0200	/* 100BASE-T4 */
+#define MII_LPA_PAUSE			0x0400	/* PAUSE */
+#define MII_LPA_ASYPAUSE		0x0800	/* Asymmetrical PAUSE */
+#define MII_LPA_RFAULT			0x2000	/* Link partner faulted */
+#define MII_LPA_LPACK			0x4000	/* Link partner acked us */
+#define MII_LPA_NPAGE			0x8000	/* Next page bit */
+
+/* Autoneg Advertisement Register */
+#define MII_AR_SELECTOR_FIELD		0x0001	/* IEEE 802.3 CSMA/CD */
+#define MII_AR_10T_HD_CAPS		0x0020	/* 10T   Half Duplex Capable */
+#define MII_AR_10T_FD_CAPS		0x0040	/* 10T   Full Duplex Capable */
+#define MII_AR_100TX_HD_CAPS		0x0080	/* 100TX Half Duplex Capable */
+#define MII_AR_100TX_FD_CAPS		0x0100	/* 100TX Full Duplex Capable */
+#define MII_AR_100T4_CAPS		0x0200	/* 100T4 Capable */
+#define MII_AR_PAUSE			0x0400	/* Pause operation desired */
+#define MII_AR_ASM_DIR			0x0800	/* Asymmetric Pause Dir bit */
+#define MII_AR_REMOTE_FAULT		0x2000	/* Remote Fault detected */
+#define MII_AR_NEXT_PAGE		0x8000	/* Next Page ability support */
+#define MII_AR_SPEED_MASK		0x01E0
+#define MII_AR_DEFAULT_CAP_MASK		0x0DE0
+
+/* 1000BASE-T Control Register */
+#define MII_ATLX_CR_1000T_HD_CAPS	0x0100	/* Adv 1000T HD cap */
+#define MII_ATLX_CR_1000T_FD_CAPS	0x0200	/* Adv 1000T FD cap */
+#define MII_ATLX_CR_1000T_REPEATER_DTE	0x0400	/* 1=Repeater/switch device,
+						 * 0=DTE device */
+#define MII_ATLX_CR_1000T_MS_VALUE	0x0800	/* 1=Config PHY as Master,
+						 * 0=Configure PHY as Slave */
+#define MII_ATLX_CR_1000T_MS_ENABLE	0x1000	/* 1=Man Master/Slave config,
+						 * 0=Auto Master/Slave config
+						 */
+#define MII_ATLX_CR_1000T_TEST_MODE_NORMAL	0x0000	/* Normal Operation */
+#define MII_ATLX_CR_1000T_TEST_MODE_1	0x2000	/* Transmit Waveform test */
+#define MII_ATLX_CR_1000T_TEST_MODE_2	0x4000	/* Master Xmit Jitter test */
+#define MII_ATLX_CR_1000T_TEST_MODE_3	0x6000	/* Slave Xmit Jitter test */
+#define MII_ATLX_CR_1000T_TEST_MODE_4	0x8000	/* Xmitter Distortion test */
+#define MII_ATLX_CR_1000T_SPEED_MASK	0x0300
+#define MII_ATLX_CR_1000T_DEFAULT_CAP_MASK	0x0300
+
+/* 1000BASE-T Status Register */
+#define MII_ATLX_SR_1000T_LP_HD_CAPS	0x0400	/* LP is 1000T HD capable */
+#define MII_ATLX_SR_1000T_LP_FD_CAPS	0x0800	/* LP is 1000T FD capable */
+#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS	0x1000	/* Remote receiver OK */
+#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS	0x2000	/* Local receiver OK */
+#define MII_ATLX_SR_1000T_MS_CONFIG_RES		0x4000	/* 1=Local TX is Master
+							 * 0=Slave
+							 */
+#define MII_ATLX_SR_1000T_MS_CONFIG_FAULT	0x8000	/* Master/Slave config
+							 * fault */
+#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS_SHIFT	12
+#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS_SHIFT		13
+
+/* Extended Status Register */
+#define MII_ATLX_ESR_1000T_HD_CAPS	0x1000	/* 1000T HD capable */
+#define MII_ATLX_ESR_1000T_FD_CAPS	0x2000	/* 1000T FD capable */
+#define MII_ATLX_ESR_1000X_HD_CAPS	0x4000	/* 1000X HD capable */
+#define MII_ATLX_ESR_1000X_FD_CAPS	0x8000	/* 1000X FD capable */
+
+/* ATLX PHY Specific Control Register */
+#define MII_ATLX_PSCR_JABBER_DISABLE	0x0001	/* 1=Jabber Func disabled */
+#define MII_ATLX_PSCR_POLARITY_REVERSAL	0x0002	/* 1=Polarity Reversal enbld */
+#define MII_ATLX_PSCR_SQE_TEST		0x0004	/* 1=SQE Test enabled */
+#define MII_ATLX_PSCR_MAC_POWERDOWN	0x0008
+#define MII_ATLX_PSCR_CLK125_DISABLE	0x0010	/* 1=CLK125 low
+						 * 0=CLK125 toggling
+						 */
+#define MII_ATLX_PSCR_MDI_MANUAL_MODE	0x0000	/* MDI Crossover Mode bits 6:5,
+						 * Manual MDI configuration
+						 */
+#define MII_ATLX_PSCR_MDIX_MANUAL_MODE	0x0020	/* Manual MDIX configuration */
+#define MII_ATLX_PSCR_AUTO_X_1000T	0x0040	/* 1000BASE-T: Auto crossover
+						 * 100BASE-TX/10BASE-T: MDI
+						 * Mode */
+#define MII_ATLX_PSCR_AUTO_X_MODE	0x0060	/* Auto crossover enabled
+						 * all speeds.
+						 */
+#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE	0x0080	/* 1=Enable Extended
+							 * 10BASE-T distance
+							 * (Lower 10BASE-T RX
+							 * Threshold)
+							 * 0=Normal 10BASE-T RX
+							 * Threshold
+							 */
+#define MII_ATLX_PSCR_MII_5BIT_ENABLE	0x0100	/* 1=5-Bit interface in
+						 * 100BASE-TX
+						 * 0=MII interface in
+						 * 100BASE-TX
+						 */
+#define MII_ATLX_PSCR_SCRAMBLER_DISABLE	0x0200	/* 1=Scrambler dsbl */
+#define MII_ATLX_PSCR_FORCE_LINK_GOOD	0x0400	/* 1=Force link good */
+#define MII_ATLX_PSCR_ASSERT_CRS_ON_TX	0x0800	/* 1=Assert CRS on Transmit */
+#define MII_ATLX_PSCR_POLARITY_REVERSAL_SHIFT		1
+#define MII_ATLX_PSCR_AUTO_X_MODE_SHIFT			5
+#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE_SHIFT	7
+
+/* ATLX PHY Specific Status Register */
+#define MII_ATLX_PSSR_SPD_DPLX_RESOLVED	0x0800	/* 1=Speed & Duplex resolved */
+#define MII_ATLX_PSSR_DPLX		0x2000	/* 1=Duplex 0=Half Duplex */
+#define MII_ATLX_PSSR_SPEED		0xC000	/* Speed, bits 14:15 */
+#define MII_ATLX_PSSR_10MBS		0x0000	/* 00=10Mbs */
+#define MII_ATLX_PSSR_100MBS		0x4000	/* 01=100Mbs */
+#define MII_ATLX_PSSR_1000MBS		0x8000	/* 10=1000Mbs */
+
+/* PCI Command Register Bit Definitions */
+#define PCI_REG_COMMAND			0x04	/* PCI Command Register */
+#define CMD_IO_SPACE			0x0001
+#define CMD_MEMORY_SPACE		0x0002
+#define CMD_BUS_MASTER			0x0004
+
+/* Wake Up Filter Control */
+#define ATLX_WUFC_LNKC	0x00000001	/* Link Status Change Wakeup Enable */
+#define ATLX_WUFC_MAG	0x00000002	/* Magic Packet Wakeup Enable */
+#define ATLX_WUFC_EX	0x00000004	/* Directed Exact Wakeup Enable */
+#define ATLX_WUFC_MC	0x00000008	/* Multicast Wakeup Enable */
+#define ATLX_WUFC_BC	0x00000010	/* Broadcast Wakeup Enable */
+
+#define ADVERTISE_10_HALF		0x0001
+#define ADVERTISE_10_FULL		0x0002
+#define ADVERTISE_100_HALF		0x0004
+#define ADVERTISE_100_FULL		0x0008
+#define ADVERTISE_1000_HALF		0x0010
+#define ADVERTISE_1000_FULL		0x0020
+#define AUTONEG_ADVERTISE_10_100_ALL	0x000F	/* All 10/100 speeds */
+#define AUTONEG_ADVERTISE_10_ALL	0x0003	/* 10Mbps Full & Half speeds */
+
+#define PHY_AUTO_NEG_TIME		45	/* 4.5 Seconds */
+#define PHY_FORCE_TIME			20	/* 2.0 Seconds */
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA */
+#define EEPROM_SUM			0xBABA
+#define NODE_ADDRESS_SIZE		6
+
+struct atlx_spi_flash_dev {
+	const char *manu_name;	/* manufacturer id */
+	/* op-code */
+	u8 cmd_wrsr;
+	u8 cmd_read;
+	u8 cmd_program;
+	u8 cmd_wren;
+	u8 cmd_wrdi;
+	u8 cmd_rdsr;
+	u8 cmd_rdid;
+	u8 cmd_sector_erase;
+	u8 cmd_chip_erase;
+};
+
+#endif /* ATLX_H */

+ 2 - 2
drivers/net/atp.c

@@ -378,8 +378,8 @@ static void __init get_node_ID(struct net_device *dev)
 		sa_offset = 15;
 
 	for (i = 0; i < 3; i++)
-		((u16 *)dev->dev_addr)[i] =
-			be16_to_cpu(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
+		((__be16 *)dev->dev_addr)[i] =
+			cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
 
 	write_reg(ioaddr, CMR2, CMR2_NULL);
 }

+ 3 - 3
drivers/net/au1000_eth.c

@@ -701,7 +701,7 @@ static struct net_device * au1000_probe(int port_num)
 	aup->mii_bus.write = mdiobus_write;
 	aup->mii_bus.reset = mdiobus_reset;
 	aup->mii_bus.name = "au1000_eth_mii";
-	aup->mii_bus.id = aup->mac_id;
+	snprintf(aup->mii_bus.id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
 	aup->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
 	for(i = 0; i < PHY_MAX_ADDR; ++i)
 		aup->mii_bus.irq[i] = PHY_POLL;
@@ -709,11 +709,11 @@ static struct net_device * au1000_probe(int port_num)
 	/* if known, set corresponding PHY IRQs */
 #if defined(AU1XXX_PHY_STATIC_CONFIG)
 # if defined(AU1XXX_PHY0_IRQ)
-	if (AU1XXX_PHY0_BUSID == aup->mii_bus.id)
+	if (AU1XXX_PHY0_BUSID == aup->mac_id)
 		aup->mii_bus.irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
 # endif
 # if defined(AU1XXX_PHY1_IRQ)
-	if (AU1XXX_PHY1_BUSID == aup->mii_bus.id)
+	if (AU1XXX_PHY1_BUSID == aup->mac_id)
 		aup->mii_bus.irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
 # endif
 #endif

+ 1 - 1
drivers/net/bfin_mac.c

@@ -969,7 +969,7 @@ static int __init bf537mac_probe(struct net_device *dev)
 	lp->mii_bus.write = mdiobus_write;
 	lp->mii_bus.reset = mdiobus_reset;
 	lp->mii_bus.name = "bfin_mac_mdio";
-	lp->mii_bus.id = 0;
+	snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "0");
 	lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
 	for (i = 0; i < PHY_MAX_ADDR; ++i)
 		lp->mii_bus.irq[i] = PHY_POLL;

+ 1 - 1
drivers/net/bonding/bond_3ad.c

@@ -2429,7 +2429,7 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
 	struct slave *slave = NULL;
 	int ret = NET_RX_DROP;
 
-	if (dev->nd_net != &init_net)
+	if (dev_net(dev) != &init_net)
 		goto out;
 
 	if (!(dev->flags & IFF_MASTER))

+ 1 - 1
drivers/net/bonding/bond_alb.c

@@ -345,7 +345,7 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
 	struct arp_pkt *arp = (struct arp_pkt *)skb->data;
 	int res = NET_RX_DROP;
 
-	if (bond_dev->nd_net != &init_net)
+	if (dev_net(bond_dev) != &init_net)
 		goto out;
 
 	if (!(bond_dev->flags & IFF_MASTER))

+ 6 - 8
drivers/net/bonding/bond_main.c

@@ -2629,7 +2629,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
 	unsigned char *arp_ptr;
 	__be32 sip, tip;
 
-	if (dev->nd_net != &init_net)
+	if (dev_net(dev) != &init_net)
 		goto out;
 
 	if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
@@ -2646,10 +2646,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
 	if (!slave || !slave_do_arp_validate(bond, slave))
 		goto out_unlock;
 
-	/* ARP header, plus 2 device addresses, plus 2 IP addresses.  */
-	if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
-				 (2 * dev->addr_len) +
-				 (2 * sizeof(u32)))))
+	if (!pskb_may_pull(skb, arp_hdr_len(dev)))
 		goto out_unlock;
 
 	arp = arp_hdr(skb);
@@ -3068,8 +3065,6 @@ out:
 
 #ifdef CONFIG_PROC_FS
 
-#define SEQ_START_TOKEN ((void *)1)
-
 static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
 {
 	struct bonding *bond = seq->private;
@@ -3473,7 +3468,7 @@ static int bond_netdev_event(struct notifier_block *this, unsigned long event, v
 {
 	struct net_device *event_dev = (struct net_device *)ptr;
 
-	if (event_dev->nd_net != &init_net)
+	if (dev_net(event_dev) != &init_net)
 		return NOTIFY_DONE;
 
 	dprintk("event_dev: %s, event: %lx\n",
@@ -3511,6 +3506,9 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
 	struct bonding *bond, *bond_next;
 	struct vlan_entry *vlan, *vlan_next;
 
+	if (dev_net(ifa->ifa_dev->dev) != &init_net)
+		return NOTIFY_DONE;
+
 	list_for_each_entry_safe(bond, bond_next, &bond_dev_list, bond_list) {
 		if (bond->dev == event_dev) {
 			switch (event) {

+ 4 - 8
drivers/net/cassini.c

@@ -532,8 +532,7 @@ static void cas_spare_free(struct cas *cp)
 	/* free spare buffers */
 	INIT_LIST_HEAD(&list);
 	spin_lock(&cp->rx_spare_lock);
-	list_splice(&cp->rx_spare_list, &list);
-	INIT_LIST_HEAD(&cp->rx_spare_list);
+	list_splice_init(&cp->rx_spare_list, &list);
 	spin_unlock(&cp->rx_spare_lock);
 	list_for_each_safe(elem, tmp, &list) {
 		cas_page_free(cp, list_entry(elem, cas_page_t, list));
@@ -546,13 +545,11 @@ static void cas_spare_free(struct cas *cp)
 	 * lock than used everywhere else to manipulate this list.
 	 */
 	spin_lock(&cp->rx_inuse_lock);
-	list_splice(&cp->rx_inuse_list, &list);
-	INIT_LIST_HEAD(&cp->rx_inuse_list);
+	list_splice_init(&cp->rx_inuse_list, &list);
 	spin_unlock(&cp->rx_inuse_lock);
 #else
 	spin_lock(&cp->rx_spare_lock);
-	list_splice(&cp->rx_inuse_list, &list);
-	INIT_LIST_HEAD(&cp->rx_inuse_list);
+	list_splice_init(&cp->rx_inuse_list, &list);
 	spin_unlock(&cp->rx_spare_lock);
 #endif
 	list_for_each_safe(elem, tmp, &list) {
@@ -573,8 +570,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
 	/* make a local copy of the list */
 	INIT_LIST_HEAD(&list);
 	spin_lock(&cp->rx_inuse_lock);
-	list_splice(&cp->rx_inuse_list, &list);
-	INIT_LIST_HEAD(&cp->rx_inuse_list);
+	list_splice_init(&cp->rx_inuse_list, &list);
 	spin_unlock(&cp->rx_inuse_lock);
 
 	list_for_each_safe(elem, tmp, &list) {

+ 2 - 3
drivers/net/cpmac.c

@@ -987,7 +987,7 @@ static int external_switch;
 static int __devinit cpmac_probe(struct platform_device *pdev)
 {
 	int rc, phy_id, i;
-	int mdio_bus_id = cpmac_mii.id;
+	char *mdio_bus_id = "0";
 	struct resource *mem;
 	struct cpmac_priv *priv;
 	struct net_device *dev;
@@ -1008,8 +1008,6 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
 		if (external_switch || dumb_switch) {
 			struct fixed_phy_status status = {};
 
-			mdio_bus_id = 0;
-
 			/*
 			 * FIXME: this should be in the platform code!
 			 * Since there is not platform code at all (that is,
@@ -1143,6 +1141,7 @@ int __devinit cpmac_init(void)
 	}
 
 	cpmac_mii.phy_mask = ~(mask | 0x80000000);
+	snprintf(cpmac_mii.id, MII_BUS_ID_SIZE, "0");
 
 	res = mdiobus_register(&cpmac_mii);
 	if (res)

+ 2 - 2
drivers/net/cxgb3/cxgb3_main.c

@@ -1014,8 +1014,8 @@ static int offload_open(struct net_device *dev)
 		     adapter->port[0]->mtu : 0xffff);
 	init_smt(adapter);
 
-	/* Never mind if the next step fails */
-	sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
+	if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
+		dev_dbg(&dev->dev, "cannot create sysfs group\n");
 
 	/* Call back all registered clients */
 	cxgb3_add_clients(tdev);

+ 19 - 3
drivers/net/cxgb3/cxgb3_offload.c

@@ -833,10 +833,26 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
 	return 0;
 }
 
+/*
+ * That skb would better have come from process_responses() where we abuse
+ * ->priority and ->csum to carry our data.  NB: if we get to per-arch
+ * ->csum, the things might get really interesting here.
+ */
+
+static inline u32 get_hwtid(struct sk_buff *skb)
+{
+	return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
+}
+
+static inline u32 get_opcode(struct sk_buff *skb)
+{
+	return G_OPCODE(ntohl((__force __be32)skb->csum));
+}
+
 static int do_term(struct t3cdev *dev, struct sk_buff *skb)
 {
-	unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
-	unsigned int opcode = G_OPCODE(ntohl(skb->csum));
+	unsigned int hwtid = get_hwtid(skb);
+	unsigned int opcode = get_opcode(skb);
 	struct t3c_tid_entry *t3c_tid;
 
 	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
@@ -914,7 +930,7 @@ int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
 {
 	while (n--) {
 		struct sk_buff *skb = *skbs++;
-		unsigned int opcode = G_OPCODE(ntohl(skb->csum));
+		unsigned int opcode = get_opcode(skb);
 		int ret = cpl_handlers[opcode] (dev, skb);
 
 #if VALIDATE_TID

+ 1 - 1
drivers/net/cxgb3/l2t.c

@@ -407,7 +407,7 @@ found:
 			} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
 				setup_l2e_send_pending(dev, NULL, e);
 		} else {
-			e->state = neigh_is_connected(neigh) ?
+			e->state = neigh->nud_state & NUD_CONNECTED ?
 			    L2T_STATE_VALID : L2T_STATE_STALE;
 			if (memcmp(e->dmac, neigh->ha, 6))
 				setup_l2e_send_pending(dev, NULL, e);

+ 2 - 1
drivers/net/defxx.c

@@ -971,7 +971,8 @@ static int __devinit dfx_driver_init(struct net_device *dev,
 	int alloc_size;			/* total buffer size needed */
 	char *top_v, *curr_v;		/* virtual addrs into memory block */
 	dma_addr_t top_p, curr_p;	/* physical addrs into memory block */
-	u32 data, le32;			/* host data register value */
+	u32 data;			/* host data register value */
+	__le32 le32;
 	char *board_name = NULL;
 
 	DBG_printk("In dfx_driver_init...\n");

+ 56 - 57
drivers/net/e1000/e1000.h

@@ -161,13 +161,13 @@ struct e1000_buffer {
 	struct sk_buff *skb;
 	dma_addr_t dma;
 	unsigned long time_stamp;
-	uint16_t length;
-	uint16_t next_to_watch;
+	u16 length;
+	u16 next_to_watch;
 };
 
 
 struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
-struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
+struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; };
 
 struct e1000_tx_ring {
 	/* pointer to the descriptor ring memory */
@@ -186,9 +186,9 @@ struct e1000_tx_ring {
 	struct e1000_buffer *buffer_info;
 
 	spinlock_t tx_lock;
-	uint16_t tdh;
-	uint16_t tdt;
-	boolean_t last_tx_tso;
+	u16 tdh;
+	u16 tdt;
+	bool last_tx_tso;
 };
 
 struct e1000_rx_ring {
@@ -213,8 +213,8 @@ struct e1000_rx_ring {
 	/* cpu for rx queue */
 	int cpu;
 
-	uint16_t rdh;
-	uint16_t rdt;
+	u16 rdh;
+	u16 rdt;
 };
 
 #define E1000_DESC_UNUSED(R) \
@@ -237,31 +237,30 @@ struct e1000_adapter {
 	struct timer_list watchdog_timer;
 	struct timer_list phy_info_timer;
 	struct vlan_group *vlgrp;
-	uint16_t mng_vlan_id;
-	uint32_t bd_number;
-	uint32_t rx_buffer_len;
-	uint32_t wol;
-	uint32_t smartspeed;
-	uint32_t en_mng_pt;
-	uint16_t link_speed;
-	uint16_t link_duplex;
+	u16 mng_vlan_id;
+	u32 bd_number;
+	u32 rx_buffer_len;
+	u32 wol;
+	u32 smartspeed;
+	u32 en_mng_pt;
+	u16 link_speed;
+	u16 link_duplex;
 	spinlock_t stats_lock;
 #ifdef CONFIG_E1000_NAPI
 	spinlock_t tx_queue_lock;
 #endif
-	atomic_t irq_sem;
 	unsigned int total_tx_bytes;
 	unsigned int total_tx_packets;
 	unsigned int total_rx_bytes;
 	unsigned int total_rx_packets;
 	/* Interrupt Throttle Rate */
-	uint32_t itr;
-	uint32_t itr_setting;
-	uint16_t tx_itr;
-	uint16_t rx_itr;
+	u32 itr;
+	u32 itr_setting;
+	u16 tx_itr;
+	u16 rx_itr;
 
 	struct work_struct reset_task;
-	uint8_t fc_autoneg;
+	u8 fc_autoneg;
 
 	struct timer_list blink_timer;
 	unsigned long led_status;
@@ -270,30 +269,30 @@ struct e1000_adapter {
 	struct e1000_tx_ring *tx_ring;      /* One per active queue */
 	unsigned int restart_queue;
 	unsigned long tx_queue_len;
-	uint32_t txd_cmd;
-	uint32_t tx_int_delay;
-	uint32_t tx_abs_int_delay;
-	uint32_t gotcl;
-	uint64_t gotcl_old;
-	uint64_t tpt_old;
-	uint64_t colc_old;
-	uint32_t tx_timeout_count;
-	uint32_t tx_fifo_head;
-	uint32_t tx_head_addr;
-	uint32_t tx_fifo_size;
-	uint8_t  tx_timeout_factor;
+	u32 txd_cmd;
+	u32 tx_int_delay;
+	u32 tx_abs_int_delay;
+	u32 gotcl;
+	u64 gotcl_old;
+	u64 tpt_old;
+	u64 colc_old;
+	u32 tx_timeout_count;
+	u32 tx_fifo_head;
+	u32 tx_head_addr;
+	u32 tx_fifo_size;
+	u8  tx_timeout_factor;
 	atomic_t tx_fifo_stall;
-	boolean_t pcix_82544;
-	boolean_t detect_tx_hung;
+	bool pcix_82544;
+	bool detect_tx_hung;
 
 	/* RX */
 #ifdef CONFIG_E1000_NAPI
-	boolean_t (*clean_rx) (struct e1000_adapter *adapter,
-			       struct e1000_rx_ring *rx_ring,
-			       int *work_done, int work_to_do);
+	bool (*clean_rx) (struct e1000_adapter *adapter,
+			  struct e1000_rx_ring *rx_ring,
+			  int *work_done, int work_to_do);
 #else
-	boolean_t (*clean_rx) (struct e1000_adapter *adapter,
-			       struct e1000_rx_ring *rx_ring);
+	bool (*clean_rx) (struct e1000_adapter *adapter,
+			  struct e1000_rx_ring *rx_ring);
 #endif
 	void (*alloc_rx_buf) (struct e1000_adapter *adapter,
 			      struct e1000_rx_ring *rx_ring,
@@ -306,17 +305,17 @@ struct e1000_adapter {
 	int num_tx_queues;
 	int num_rx_queues;
 
-	uint64_t hw_csum_err;
-	uint64_t hw_csum_good;
-	uint64_t rx_hdr_split;
-	uint32_t alloc_rx_buff_failed;
-	uint32_t rx_int_delay;
-	uint32_t rx_abs_int_delay;
-	boolean_t rx_csum;
+	u64 hw_csum_err;
+	u64 hw_csum_good;
+	u64 rx_hdr_split;
+	u32 alloc_rx_buff_failed;
+	u32 rx_int_delay;
+	u32 rx_abs_int_delay;
+	bool rx_csum;
 	unsigned int rx_ps_pages;
-	uint32_t gorcl;
-	uint64_t gorcl_old;
-	uint16_t rx_ps_bsize0;
+	u32 gorcl;
+	u64 gorcl_old;
+	u16 rx_ps_bsize0;
 
 
 	/* OS defined structs */
@@ -330,19 +329,19 @@ struct e1000_adapter {
 	struct e1000_phy_info phy_info;
 	struct e1000_phy_stats phy_stats;
 
-	uint32_t test_icr;
+	u32 test_icr;
 	struct e1000_tx_ring test_tx_ring;
 	struct e1000_rx_ring test_rx_ring;
 
 	int msg_enable;
-	boolean_t have_msi;
+	bool have_msi;
 
 	/* to not mess up cache alignment, always add to the bottom */
-	boolean_t tso_force;
-	boolean_t smart_power_down;	/* phy smart power down */
-	boolean_t quad_port_a;
+	bool tso_force;
+	bool smart_power_down;	/* phy smart power down */
+	bool quad_port_a;
 	unsigned long flags;
-	uint32_t eeprom_wol;
+	u32 eeprom_wol;
 };
 
 enum e1000_state_t {

+ 88 - 87
drivers/net/e1000/e1000_ethtool.c

@@ -36,7 +36,7 @@ extern int e1000_up(struct e1000_adapter *adapter);
 extern void e1000_down(struct e1000_adapter *adapter);
 extern void e1000_reinit_locked(struct e1000_adapter *adapter);
 extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
 extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
 extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
 extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
@@ -289,7 +289,7 @@ e1000_set_pauseparam(struct net_device *netdev,
 	return retval;
 }
 
-static uint32_t
+static u32
 e1000_get_rx_csum(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -297,7 +297,7 @@ e1000_get_rx_csum(struct net_device *netdev)
 }
 
 static int
-e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
+e1000_set_rx_csum(struct net_device *netdev, u32 data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	adapter->rx_csum = data;
@@ -309,14 +309,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
 	return 0;
 }
 
-static uint32_t
+static u32
 e1000_get_tx_csum(struct net_device *netdev)
 {
 	return (netdev->features & NETIF_F_HW_CSUM) != 0;
 }
 
 static int
-e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
+e1000_set_tx_csum(struct net_device *netdev, u32 data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 
@@ -335,7 +335,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
 }
 
 static int
-e1000_set_tso(struct net_device *netdev, uint32_t data)
+e1000_set_tso(struct net_device *netdev, u32 data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	if ((adapter->hw.mac_type < e1000_82544) ||
@@ -353,11 +353,11 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
 		netdev->features &= ~NETIF_F_TSO6;
 
 	DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
-	adapter->tso_force = TRUE;
+	adapter->tso_force = true;
 	return 0;
 }
 
-static uint32_t
+static u32
 e1000_get_msglevel(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -365,7 +365,7 @@ e1000_get_msglevel(struct net_device *netdev)
 }
 
 static void
-e1000_set_msglevel(struct net_device *netdev, uint32_t data)
+e1000_set_msglevel(struct net_device *netdev, u32 data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	adapter->msg_enable = data;
@@ -375,7 +375,7 @@ static int
 e1000_get_regs_len(struct net_device *netdev)
 {
 #define E1000_REGS_LEN 32
-	return E1000_REGS_LEN * sizeof(uint32_t);
+	return E1000_REGS_LEN * sizeof(u32);
 }
 
 static void
@@ -384,10 +384,10 @@ e1000_get_regs(struct net_device *netdev,
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	uint32_t *regs_buff = p;
-	uint16_t phy_data;
+	u32 *regs_buff = p;
+	u16 phy_data;
 
-	memset(p, 0, E1000_REGS_LEN * sizeof(uint32_t));
+	memset(p, 0, E1000_REGS_LEN * sizeof(u32));
 
 	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
 
@@ -412,44 +412,44 @@ e1000_get_regs(struct net_device *netdev,
 				    IGP01E1000_PHY_AGC_A);
 		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
 				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
-		regs_buff[13] = (uint32_t)phy_data; /* cable length */
+		regs_buff[13] = (u32)phy_data; /* cable length */
 		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
 				    IGP01E1000_PHY_AGC_B);
 		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
 				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
-		regs_buff[14] = (uint32_t)phy_data; /* cable length */
+		regs_buff[14] = (u32)phy_data; /* cable length */
 		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
 				    IGP01E1000_PHY_AGC_C);
 		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
 				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
-		regs_buff[15] = (uint32_t)phy_data; /* cable length */
+		regs_buff[15] = (u32)phy_data; /* cable length */
 		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
 				    IGP01E1000_PHY_AGC_D);
 		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
 				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
-		regs_buff[16] = (uint32_t)phy_data; /* cable length */
+		regs_buff[16] = (u32)phy_data; /* cable length */
 		regs_buff[17] = 0; /* extended 10bt distance (not needed) */
 		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
 		e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
 				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
-		regs_buff[18] = (uint32_t)phy_data; /* cable polarity */
+		regs_buff[18] = (u32)phy_data; /* cable polarity */
 		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
 				    IGP01E1000_PHY_PCS_INIT_REG);
 		e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
 				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
-		regs_buff[19] = (uint32_t)phy_data; /* cable polarity */
+		regs_buff[19] = (u32)phy_data; /* cable polarity */
 		regs_buff[20] = 0; /* polarity correction enabled (always) */
 		regs_buff[22] = 0; /* phy receive errors (unavailable) */
 		regs_buff[23] = regs_buff[18]; /* mdix mode */
 		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
 	} else {
 		e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
-		regs_buff[13] = (uint32_t)phy_data; /* cable length */
+		regs_buff[13] = (u32)phy_data; /* cable length */
 		regs_buff[14] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
 		regs_buff[15] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
 		regs_buff[16] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
 		e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-		regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */
+		regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
 		regs_buff[18] = regs_buff[13]; /* cable polarity */
 		regs_buff[19] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
 		regs_buff[20] = regs_buff[17]; /* polarity correction */
@@ -459,7 +459,7 @@ e1000_get_regs(struct net_device *netdev,
 	}
 	regs_buff[21] = adapter->phy_stats.idle_errors;  /* phy idle errors */
 	e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
-	regs_buff[24] = (uint32_t)phy_data;  /* phy local receiver status */
+	regs_buff[24] = (u32)phy_data;  /* phy local receiver status */
 	regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
 	if (hw->mac_type >= e1000_82540 &&
 	    hw->mac_type < e1000_82571 &&
@@ -477,14 +477,14 @@ e1000_get_eeprom_len(struct net_device *netdev)
 
 static int
 e1000_get_eeprom(struct net_device *netdev,
-                      struct ethtool_eeprom *eeprom, uint8_t *bytes)
+                      struct ethtool_eeprom *eeprom, u8 *bytes)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	uint16_t *eeprom_buff;
+	u16 *eeprom_buff;
 	int first_word, last_word;
 	int ret_val = 0;
-	uint16_t i;
+	u16 i;
 
 	if (eeprom->len == 0)
 		return -EINVAL;
@@ -494,7 +494,7 @@ e1000_get_eeprom(struct net_device *netdev,
 	first_word = eeprom->offset >> 1;
 	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
 
-	eeprom_buff = kmalloc(sizeof(uint16_t) *
+	eeprom_buff = kmalloc(sizeof(u16) *
 			(last_word - first_word + 1), GFP_KERNEL);
 	if (!eeprom_buff)
 		return -ENOMEM;
@@ -514,7 +514,7 @@ e1000_get_eeprom(struct net_device *netdev,
 	for (i = 0; i < last_word - first_word + 1; i++)
 		le16_to_cpus(&eeprom_buff[i]);
 
-	memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
 			eeprom->len);
 	kfree(eeprom_buff);
 
@@ -523,14 +523,14 @@ e1000_get_eeprom(struct net_device *netdev,
 
 static int
 e1000_set_eeprom(struct net_device *netdev,
-                      struct ethtool_eeprom *eeprom, uint8_t *bytes)
+                      struct ethtool_eeprom *eeprom, u8 *bytes)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	uint16_t *eeprom_buff;
+	u16 *eeprom_buff;
 	void *ptr;
 	int max_len, first_word, last_word, ret_val = 0;
-	uint16_t i;
+	u16 i;
 
 	if (eeprom->len == 0)
 		return -EOPNOTSUPP;
@@ -590,7 +590,7 @@ e1000_get_drvinfo(struct net_device *netdev,
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	char firmware_version[32];
-	uint16_t eeprom_data;
+	u16 eeprom_data;
 
 	strncpy(drvinfo->driver,  e1000_driver_name, 32);
 	strncpy(drvinfo->version, e1000_driver_version, 32);
@@ -674,13 +674,13 @@ e1000_set_ringparam(struct net_device *netdev,
 	adapter->tx_ring = txdr;
 	adapter->rx_ring = rxdr;
 
-	rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
-	rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
+	rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
+	rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
 		E1000_MAX_RXD : E1000_MAX_82544_RXD));
 	rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
 
-	txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
-	txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
+	txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
+	txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
 		E1000_MAX_TXD : E1000_MAX_82544_TXD));
 	txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
 
@@ -728,13 +728,13 @@ err_setup:
 	return err;
 }
 
-static bool reg_pattern_test(struct e1000_adapter *adapter, uint64_t *data,
-			     int reg, uint32_t mask, uint32_t write)
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
+			     int reg, u32 mask, u32 write)
 {
-	static const uint32_t test[] =
+	static const u32 test[] =
 		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
-	uint8_t __iomem *address = adapter->hw.hw_addr + reg;
-	uint32_t read;
+	u8 __iomem *address = adapter->hw.hw_addr + reg;
+	u32 read;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(test); i++) {
@@ -751,11 +751,11 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, uint64_t *data,
 	return false;
 }
 
-static bool reg_set_and_check(struct e1000_adapter *adapter, uint64_t *data,
-			      int reg, uint32_t mask, uint32_t write)
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
+			      int reg, u32 mask, u32 write)
 {
-	uint8_t __iomem *address = adapter->hw.hw_addr + reg;
-	uint32_t read;
+	u8 __iomem *address = adapter->hw.hw_addr + reg;
+	u32 read;
 
 	writel(write & mask, address);
 	read = readl(address);
@@ -788,10 +788,10 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, uint64_t *data,
 	} while (0)
 
 static int
-e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
+e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
 {
-	uint32_t value, before, after;
-	uint32_t i, toggle;
+	u32 value, before, after;
+	u32 i, toggle;
 
 	/* The status register is Read Only, so a write should fail.
 	 * Some bits that get toggled are ignored.
@@ -884,11 +884,11 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
 }
 
 static int
-e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
+e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
 {
-	uint16_t temp;
-	uint16_t checksum = 0;
-	uint16_t i;
+	u16 temp;
+	u16 checksum = 0;
+	u16 i;
 
 	*data = 0;
 	/* Read and add up the contents of the EEPROM */
@@ -901,7 +901,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
 	}
 
 	/* If Checksum is not Correct return error else test passed */
-	if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
+	if ((checksum != (u16) EEPROM_SUM) && !(*data))
 		*data = 2;
 
 	return *data;
@@ -919,11 +919,12 @@ e1000_test_intr(int irq, void *data)
 }
 
 static int
-e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
+e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
 {
 	struct net_device *netdev = adapter->netdev;
-	uint32_t mask, i=0, shared_int = TRUE;
-	uint32_t irq = adapter->pdev->irq;
+	u32 mask, i = 0;
+	bool shared_int = true;
+	u32 irq = adapter->pdev->irq;
 
 	*data = 0;
 
@@ -931,7 +932,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
 	/* Hook up test interrupt handler just for this test */
 	if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
 	                 netdev))
-		shared_int = FALSE;
+		shared_int = false;
 	else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
 	         netdev->name, netdev)) {
 		*data = 1;
@@ -1069,7 +1070,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 	struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
 	struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
 	struct pci_dev *pdev = adapter->pdev;
-	uint32_t rctl;
+	u32 rctl;
 	int i, ret_val;
 
 	/* Setup Tx descriptor ring and Tx buffers */
@@ -1095,8 +1096,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 	txdr->next_to_use = txdr->next_to_clean = 0;
 
 	E1000_WRITE_REG(&adapter->hw, TDBAL,
-			((uint64_t) txdr->dma & 0x00000000FFFFFFFF));
-	E1000_WRITE_REG(&adapter->hw, TDBAH, ((uint64_t) txdr->dma >> 32));
+			((u64) txdr->dma & 0x00000000FFFFFFFF));
+	E1000_WRITE_REG(&adapter->hw, TDBAH, ((u64) txdr->dma >> 32));
 	E1000_WRITE_REG(&adapter->hw, TDLEN,
 			txdr->count * sizeof(struct e1000_tx_desc));
 	E1000_WRITE_REG(&adapter->hw, TDH, 0);
@@ -1152,8 +1153,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 	rctl = E1000_READ_REG(&adapter->hw, RCTL);
 	E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
 	E1000_WRITE_REG(&adapter->hw, RDBAL,
-			((uint64_t) rxdr->dma & 0xFFFFFFFF));
-	E1000_WRITE_REG(&adapter->hw, RDBAH, ((uint64_t) rxdr->dma >> 32));
+			((u64) rxdr->dma & 0xFFFFFFFF));
+	E1000_WRITE_REG(&adapter->hw, RDBAH, ((u64) rxdr->dma >> 32));
 	E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size);
 	E1000_WRITE_REG(&adapter->hw, RDH, 0);
 	E1000_WRITE_REG(&adapter->hw, RDT, 0);
@@ -1201,7 +1202,7 @@ e1000_phy_disable_receiver(struct e1000_adapter *adapter)
 static void
 e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
 {
-	uint16_t phy_reg;
+	u16 phy_reg;
 
 	/* Because we reset the PHY above, we need to re-force TX_CLK in the
 	 * Extended PHY Specific Control Register to 25MHz clock.  This
@@ -1225,8 +1226,8 @@ e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
 static int
 e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
 {
-	uint32_t ctrl_reg;
-	uint16_t phy_reg;
+	u32 ctrl_reg;
+	u16 phy_reg;
 
 	/* Setup the Device Control Register for PHY loopback test. */
 
@@ -1292,10 +1293,10 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
 static int
 e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 {
-	uint32_t ctrl_reg = 0;
-	uint32_t stat_reg = 0;
+	u32 ctrl_reg = 0;
+	u32 stat_reg = 0;
 
-	adapter->hw.autoneg = FALSE;
+	adapter->hw.autoneg = false;
 
 	if (adapter->hw.phy_type == e1000_phy_m88) {
 		/* Auto-MDI/MDIX Off */
@@ -1362,8 +1363,8 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 static int
 e1000_set_phy_loopback(struct e1000_adapter *adapter)
 {
-	uint16_t phy_reg = 0;
-	uint16_t count = 0;
+	u16 phy_reg = 0;
+	u16 count = 0;
 
 	switch (adapter->hw.mac_type) {
 	case e1000_82543:
@@ -1415,7 +1416,7 @@ static int
 e1000_setup_loopback_test(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	uint32_t rctl;
+	u32 rctl;
 
 	if (hw->media_type == e1000_media_type_fiber ||
 	    hw->media_type == e1000_media_type_internal_serdes) {
@@ -1450,8 +1451,8 @@ static void
 e1000_loopback_cleanup(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	uint32_t rctl;
-	uint16_t phy_reg;
+	u32 rctl;
+	u16 phy_reg;
 
 	rctl = E1000_READ_REG(hw, RCTL);
 	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
@@ -1473,7 +1474,7 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
 	case e1000_82545_rev_3:
 	case e1000_82546_rev_3:
 	default:
-		hw->autoneg = TRUE;
+		hw->autoneg = true;
 		if (hw->phy_type == e1000_phy_gg82563)
 			e1000_write_phy_reg(hw,
 					    GG82563_PHY_KMRN_MODE_CTRL,
@@ -1577,7 +1578,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
 }
 
 static int
-e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
+e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
 {
 	/* PHY loopback cannot be performed if SoL/IDER
 	 * sessions are active */
@@ -1602,18 +1603,18 @@ out:
 }
 
 static int
-e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
+e1000_link_test(struct e1000_adapter *adapter, u64 *data)
 {
 	*data = 0;
 	if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
 		int i = 0;
-		adapter->hw.serdes_link_down = TRUE;
+		adapter->hw.serdes_link_down = true;
 
 		/* On some blade server designs, link establishment
 		 * could take as long as 2-3 minutes */
 		do {
 			e1000_check_for_link(&adapter->hw);
-			if (adapter->hw.serdes_link_down == FALSE)
+			if (!adapter->hw.serdes_link_down)
 				return *data;
 			msleep(20);
 		} while (i++ < 3750);
@@ -1646,19 +1647,19 @@ e1000_get_sset_count(struct net_device *netdev, int sset)
 
 static void
 e1000_diag_test(struct net_device *netdev,
-		   struct ethtool_test *eth_test, uint64_t *data)
+		   struct ethtool_test *eth_test, u64 *data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	boolean_t if_running = netif_running(netdev);
+	bool if_running = netif_running(netdev);
 
 	set_bit(__E1000_TESTING, &adapter->flags);
 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 		/* Offline tests */
 
 		/* save speed, duplex, autoneg settings */
-		uint16_t autoneg_advertised = adapter->hw.autoneg_advertised;
-		uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex;
-		uint8_t autoneg = adapter->hw.autoneg;
+		u16 autoneg_advertised = adapter->hw.autoneg_advertised;
+		u8 forced_speed_duplex = adapter->hw.forced_speed_duplex;
+		u8 autoneg = adapter->hw.autoneg;
 
 		DPRINTK(HW, INFO, "offline testing starting\n");
 
@@ -1876,7 +1877,7 @@ e1000_led_blink_callback(unsigned long data)
 }
 
 static int
-e1000_phys_id(struct net_device *netdev, uint32_t data)
+e1000_phys_id(struct net_device *netdev, u32 data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 
@@ -1926,7 +1927,7 @@ e1000_nway_reset(struct net_device *netdev)
 
 static void
 e1000_get_ethtool_stats(struct net_device *netdev,
-		struct ethtool_stats *stats, uint64_t *data)
+		struct ethtool_stats *stats, u64 *data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	int i;
@@ -1935,15 +1936,15 @@ e1000_get_ethtool_stats(struct net_device *netdev,
 	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
 		char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
 		data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
-			sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
 /*	BUG_ON(i != E1000_STATS_LEN); */
 }
 
 static void
-e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
+e1000_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
 {
-	uint8_t *p = data;
+	u8 *p = data;
 	int i;
 
 	switch (stringset) {

+ 770 - 771
drivers/net/e1000/e1000_hw.c

@@ -33,106 +33,107 @@
 
 #include "e1000_hw.h"
 
-static int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
-static void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
-static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
-static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
-static int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
+static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
+static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
+static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
+static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
 static void e1000_release_software_semaphore(struct e1000_hw *hw);
 
-static uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw);
-static int32_t e1000_check_downshift(struct e1000_hw *hw);
-static int32_t e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *polarity);
+static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
+static s32 e1000_check_downshift(struct e1000_hw *hw);
+static s32 e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *polarity);
 static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
 static void e1000_clear_vfta(struct e1000_hw *hw);
-static int32_t e1000_commit_shadow_ram(struct e1000_hw *hw);
-static int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up);
-static int32_t e1000_config_fc_after_link_up(struct e1000_hw *hw);
-static int32_t e1000_detect_gig_phy(struct e1000_hw *hw);
-static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank);
-static int32_t e1000_get_auto_rd_done(struct e1000_hw *hw);
-static int32_t e1000_get_cable_length(struct e1000_hw *hw, uint16_t *min_length, uint16_t *max_length);
-static int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
-static int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw);
-static int32_t e1000_get_software_flag(struct e1000_hw *hw);
-static int32_t e1000_ich8_cycle_init(struct e1000_hw *hw);
-static int32_t e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout);
-static int32_t e1000_id_led_init(struct e1000_hw *hw);
-static int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size);
-static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
+static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
+						  bool link_up);
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
+static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, u16 *max_length);
+static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static s32 e1000_get_software_flag(struct e1000_hw *hw);
+static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
+static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
+static s32 e1000_id_led_init(struct e1000_hw *hw);
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, u32 cnf_base_addr, u32 cnf_size);
+static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
 static void e1000_init_rx_addrs(struct e1000_hw *hw);
 static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
-static boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
-static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
-static int32_t e1000_mng_enable_host_if(struct e1000_hw *hw);
-static int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer, uint16_t length, uint16_t offset, uint8_t *sum);
-static int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw, struct e1000_host_mng_command_header* hdr);
-static int32_t e1000_mng_write_commit(struct e1000_hw *hw);
-static int32_t e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
-static int32_t e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
-static int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
-static int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
-static int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
-static int32_t e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
+static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, u16 offset, u8 *sum);
+static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw, struct e1000_host_mng_command_header* hdr);
+static s32 e1000_mng_write_commit(struct e1000_hw *hw);
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
 static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
-static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t *data);
-static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte);
-static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte);
-static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data);
-static int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t *data);
-static int32_t e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t data);
-static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
-static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
+static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
+static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
+static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 *data);
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 data);
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 static void e1000_release_software_flag(struct e1000_hw *hw);
-static int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
-static int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active);
-static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop);
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
 static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
-static int32_t e1000_wait_autoneg(struct e1000_hw *hw);
-static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value);
-static int32_t e1000_set_phy_type(struct e1000_hw *hw);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
+static s32 e1000_set_phy_type(struct e1000_hw *hw);
 static void e1000_phy_init_script(struct e1000_hw *hw);
-static int32_t e1000_setup_copper_link(struct e1000_hw *hw);
-static int32_t e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
-static int32_t e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
-static int32_t e1000_phy_force_speed_duplex(struct e1000_hw *hw);
-static int32_t e1000_config_mac_to_phy(struct e1000_hw *hw);
-static void e1000_raise_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl);
-static void e1000_lower_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl);
-static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, uint32_t data,
-                                     uint16_t count);
-static uint16_t e1000_shift_in_mdi_bits(struct e1000_hw *hw);
-static int32_t e1000_phy_reset_dsp(struct e1000_hw *hw);
-static int32_t e1000_write_eeprom_spi(struct e1000_hw *hw, uint16_t offset,
-                                      uint16_t words, uint16_t *data);
-static int32_t e1000_write_eeprom_microwire(struct e1000_hw *hw,
-                                            uint16_t offset, uint16_t words,
-                                            uint16_t *data);
-static int32_t e1000_spi_eeprom_ready(struct e1000_hw *hw);
-static void e1000_raise_ee_clk(struct e1000_hw *hw, uint32_t *eecd);
-static void e1000_lower_ee_clk(struct e1000_hw *hw, uint32_t *eecd);
-static void e1000_shift_out_ee_bits(struct e1000_hw *hw, uint16_t data,
-                                    uint16_t count);
-static int32_t e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
-                                      uint16_t phy_data);
-static int32_t e1000_read_phy_reg_ex(struct e1000_hw *hw,uint32_t reg_addr,
-                                     uint16_t *phy_data);
-static uint16_t e1000_shift_in_ee_bits(struct e1000_hw *hw, uint16_t count);
-static int32_t e1000_acquire_eeprom(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data,
+                                     u16 count);
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
+                                      u16 words, u16 *data);
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw,
+                                            u16 offset, u16 words,
+                                            u16 *data);
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data,
+                                    u16 count);
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+                                      u16 phy_data);
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr,
+                                     u16 *phy_data);
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
 static void e1000_release_eeprom(struct e1000_hw *hw);
 static void e1000_standby_eeprom(struct e1000_hw *hw);
-static int32_t e1000_set_vco_speed(struct e1000_hw *hw);
-static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw);
-static int32_t e1000_set_phy_mode(struct e1000_hw *hw);
-static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer);
-static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length);
-static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
-                                               uint16_t duplex);
-static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
+static s32 e1000_set_vco_speed(struct e1000_hw *hw);
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
+static s32 e1000_set_phy_mode(struct e1000_hw *hw);
+static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
+static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
+                                               u16 duplex);
+static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
 
 /* IGP cable length table */
 static const
-uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
+u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
     { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
       5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
       25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
@@ -143,7 +144,7 @@ uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
       110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
 
 static const
-uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
+u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
     { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
       0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
       6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
@@ -158,7 +159,7 @@ uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-static int32_t
+static s32
 e1000_set_phy_type(struct e1000_hw *hw)
 {
     DEBUGFUNC("e1000_set_phy_type");
@@ -212,8 +213,8 @@ e1000_set_phy_type(struct e1000_hw *hw)
 static void
 e1000_phy_init_script(struct e1000_hw *hw)
 {
-    uint32_t ret_val;
-    uint16_t phy_saved_data;
+    u32 ret_val;
+    u16 phy_saved_data;
 
     DEBUGFUNC("e1000_phy_init_script");
 
@@ -271,7 +272,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
         e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
 
         if (hw->mac_type == e1000_82547) {
-            uint16_t fused, fine, coarse;
+            u16 fused, fine, coarse;
 
             /* Move to analog registers page */
             e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
@@ -305,7 +306,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-int32_t
+s32
 e1000_set_mac_type(struct e1000_hw *hw)
 {
 	DEBUGFUNC("e1000_set_mac_type");
@@ -425,22 +426,22 @@ e1000_set_mac_type(struct e1000_hw *hw)
 
 	switch (hw->mac_type) {
 	case e1000_ich8lan:
-		hw->swfwhw_semaphore_present = TRUE;
-		hw->asf_firmware_present = TRUE;
+		hw->swfwhw_semaphore_present = true;
+		hw->asf_firmware_present = true;
 		break;
 	case e1000_80003es2lan:
-		hw->swfw_sync_present = TRUE;
+		hw->swfw_sync_present = true;
 		/* fall through */
 	case e1000_82571:
 	case e1000_82572:
 	case e1000_82573:
-		hw->eeprom_semaphore_present = TRUE;
+		hw->eeprom_semaphore_present = true;
 		/* fall through */
 	case e1000_82541:
 	case e1000_82547:
 	case e1000_82541_rev_2:
 	case e1000_82547_rev_2:
-		hw->asf_firmware_present = TRUE;
+		hw->asf_firmware_present = true;
 		break;
 	default:
 		break;
@@ -450,20 +451,20 @@ e1000_set_mac_type(struct e1000_hw *hw)
 	 * FD mode
 	 */
 	if (hw->mac_type == e1000_82543)
-		hw->bad_tx_carr_stats_fd = TRUE;
+		hw->bad_tx_carr_stats_fd = true;
 
 	/* capable of receiving management packets to the host */
 	if (hw->mac_type >= e1000_82571)
-		hw->has_manc2h = TRUE;
+		hw->has_manc2h = true;
 
 	/* In rare occasions, ESB2 systems would end up started without
 	 * the RX unit being turned on.
 	 */
 	if (hw->mac_type == e1000_80003es2lan)
-		hw->rx_needs_kicking = TRUE;
+		hw->rx_needs_kicking = true;
 
 	if (hw->mac_type > e1000_82544)
-		hw->has_smbus = TRUE;
+		hw->has_smbus = true;
 
 	return E1000_SUCCESS;
 }
@@ -476,13 +477,13 @@ e1000_set_mac_type(struct e1000_hw *hw)
 void
 e1000_set_media_type(struct e1000_hw *hw)
 {
-    uint32_t status;
+    u32 status;
 
     DEBUGFUNC("e1000_set_media_type");
 
     if (hw->mac_type != e1000_82543) {
         /* tbi_compatibility is only valid on 82543 */
-        hw->tbi_compatibility_en = FALSE;
+        hw->tbi_compatibility_en = false;
     }
 
     switch (hw->device_id) {
@@ -513,7 +514,7 @@ e1000_set_media_type(struct e1000_hw *hw)
             if (status & E1000_STATUS_TBIMODE) {
                 hw->media_type = e1000_media_type_fiber;
                 /* tbi_compatibility not valid on fiber */
-                hw->tbi_compatibility_en = FALSE;
+                hw->tbi_compatibility_en = false;
             } else {
                 hw->media_type = e1000_media_type_copper;
             }
@@ -527,17 +528,17 @@ e1000_set_media_type(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-int32_t
+s32
 e1000_reset_hw(struct e1000_hw *hw)
 {
-    uint32_t ctrl;
-    uint32_t ctrl_ext;
-    uint32_t icr;
-    uint32_t manc;
-    uint32_t led_ctrl;
-    uint32_t timeout;
-    uint32_t extcnf_ctrl;
-    int32_t ret_val;
+    u32 ctrl;
+    u32 ctrl_ext;
+    u32 icr;
+    u32 manc;
+    u32 led_ctrl;
+    u32 timeout;
+    u32 extcnf_ctrl;
+    s32 ret_val;
 
     DEBUGFUNC("e1000_reset_hw");
 
@@ -569,7 +570,7 @@ e1000_reset_hw(struct e1000_hw *hw)
     E1000_WRITE_FLUSH(hw);
 
     /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
-    hw->tbi_compatibility_on = FALSE;
+    hw->tbi_compatibility_on = false;
 
     /* Delay to allow any outstanding PCI transactions to complete before
      * resetting the device
@@ -682,7 +683,7 @@ e1000_reset_hw(struct e1000_hw *hw)
             msleep(20);
             break;
         case e1000_82573:
-            if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
+            if (!e1000_is_onboard_nvm_eeprom(hw)) {
                 udelay(10);
                 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
                 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
@@ -729,7 +730,7 @@ e1000_reset_hw(struct e1000_hw *hw)
     }
 
     if (hw->mac_type == e1000_ich8lan) {
-        uint32_t kab = E1000_READ_REG(hw, KABGTXD);
+        u32 kab = E1000_READ_REG(hw, KABGTXD);
         kab |= E1000_KABGTXD_BGSQLBIAS;
         E1000_WRITE_REG(hw, KABGTXD, kab);
     }
@@ -751,10 +752,10 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
 {
     if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) {
         /* Settings common to all PCI-express silicon */
-        uint32_t reg_ctrl, reg_ctrl_ext;
-        uint32_t reg_tarc0, reg_tarc1;
-        uint32_t reg_tctl;
-        uint32_t reg_txdctl, reg_txdctl1;
+        u32 reg_ctrl, reg_ctrl_ext;
+        u32 reg_tarc0, reg_tarc1;
+        u32 reg_tctl;
+        u32 reg_txdctl, reg_txdctl1;
 
         /* link autonegotiation/sync workarounds */
         reg_tarc0 = E1000_READ_REG(hw, TARC0);
@@ -865,15 +866,15 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
  * configuration and flow control settings. Clears all on-chip counters. Leaves
  * the transmit and receive units disabled and uninitialized.
  *****************************************************************************/
-int32_t
+s32
 e1000_init_hw(struct e1000_hw *hw)
 {
-    uint32_t ctrl;
-    uint32_t i;
-    int32_t ret_val;
-    uint32_t mta_size;
-    uint32_t reg_data;
-    uint32_t ctrl_ext;
+    u32 ctrl;
+    u32 i;
+    s32 ret_val;
+    u32 mta_size;
+    u32 reg_data;
+    u32 ctrl_ext;
 
     DEBUGFUNC("e1000_init_hw");
 
@@ -1019,7 +1020,7 @@ e1000_init_hw(struct e1000_hw *hw)
 
 
     if (hw->mac_type == e1000_82573) {
-        uint32_t gcr = E1000_READ_REG(hw, GCR);
+        u32 gcr = E1000_READ_REG(hw, GCR);
         gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
         E1000_WRITE_REG(hw, GCR, gcr);
     }
@@ -1053,11 +1054,11 @@ e1000_init_hw(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code.
  *****************************************************************************/
-static int32_t
+static s32
 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
 {
-    uint16_t eeprom_data;
-    int32_t  ret_val;
+    u16 eeprom_data;
+    s32  ret_val;
 
     DEBUGFUNC("e1000_adjust_serdes_amplitude");
 
@@ -1099,12 +1100,12 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
  * established. Assumes the hardware has previously been reset and the
  * transmitter and receiver are not enabled.
  *****************************************************************************/
-int32_t
+s32
 e1000_setup_link(struct e1000_hw *hw)
 {
-    uint32_t ctrl_ext;
-    int32_t ret_val;
-    uint16_t eeprom_data;
+    u32 ctrl_ext;
+    s32 ret_val;
+    u16 eeprom_data;
 
     DEBUGFUNC("e1000_setup_link");
 
@@ -1232,15 +1233,15 @@ e1000_setup_link(struct e1000_hw *hw)
  * link. Assumes the hardware has been previously reset and the transmitter
  * and receiver are not enabled.
  *****************************************************************************/
-static int32_t
+static s32
 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
 {
-    uint32_t ctrl;
-    uint32_t status;
-    uint32_t txcw = 0;
-    uint32_t i;
-    uint32_t signal = 0;
-    int32_t ret_val;
+    u32 ctrl;
+    u32 status;
+    u32 txcw = 0;
+    u32 i;
+    u32 signal = 0;
+    s32 ret_val;
 
     DEBUGFUNC("e1000_setup_fiber_serdes_link");
 
@@ -1379,12 +1380,12 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
-static int32_t
+static s32
 e1000_copper_link_preconfig(struct e1000_hw *hw)
 {
-    uint32_t ctrl;
-    int32_t ret_val;
-    uint16_t phy_data;
+    u32 ctrl;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_copper_link_preconfig");
 
@@ -1428,7 +1429,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
     if (hw->mac_type <= e1000_82543 ||
         hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
         hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
-        hw->phy_reset_disable = FALSE;
+        hw->phy_reset_disable = false;
 
    return E1000_SUCCESS;
 }
@@ -1439,12 +1440,12 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 *********************************************************************/
-static int32_t
+static s32
 e1000_copper_link_igp_setup(struct e1000_hw *hw)
 {
-    uint32_t led_ctrl;
-    int32_t ret_val;
-    uint16_t phy_data;
+    u32 led_ctrl;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_copper_link_igp_setup");
 
@@ -1470,7 +1471,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
     /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
     if (hw->phy_type == e1000_phy_igp) {
         /* disable lplu d3 during driver init */
-        ret_val = e1000_set_d3_lplu_state(hw, FALSE);
+        ret_val = e1000_set_d3_lplu_state(hw, false);
         if (ret_val) {
             DEBUGOUT("Error Disabling LPLU D3\n");
             return ret_val;
@@ -1478,7 +1479,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
     }
 
     /* disable lplu d0 during driver init */
-    ret_val = e1000_set_d0_lplu_state(hw, FALSE);
+    ret_val = e1000_set_d0_lplu_state(hw, false);
     if (ret_val) {
         DEBUGOUT("Error Disabling LPLU D0\n");
         return ret_val;
@@ -1586,12 +1587,12 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 *********************************************************************/
-static int32_t
+static s32
 e1000_copper_link_ggp_setup(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t phy_data;
-    uint32_t reg_data;
+    s32 ret_val;
+    u16 phy_data;
+    u32 reg_data;
 
     DEBUGFUNC("e1000_copper_link_ggp_setup");
 
@@ -1691,7 +1692,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
          * firmware will have already initialized them.  We only initialize
          * them if the HW is not in IAMT mode.
          */
-        if (e1000_check_mng_mode(hw) == FALSE) {
+        if (!e1000_check_mng_mode(hw)) {
             /* Enable Electrical Idle on the PHY */
             phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
             ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
@@ -1734,11 +1735,11 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 *********************************************************************/
-static int32_t
+static s32
 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t phy_data;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_copper_link_mgp_setup");
 
@@ -1838,11 +1839,11 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 *********************************************************************/
-static int32_t
+static s32
 e1000_copper_link_autoneg(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t phy_data;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_copper_link_autoneg");
 
@@ -1892,7 +1893,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
         }
     }
 
-    hw->get_link_status = TRUE;
+    hw->get_link_status = true;
 
     return E1000_SUCCESS;
 }
@@ -1909,10 +1910,10 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
-static int32_t
+static s32
 e1000_copper_link_postconfig(struct e1000_hw *hw)
 {
-    int32_t ret_val;
+    s32 ret_val;
     DEBUGFUNC("e1000_copper_link_postconfig");
 
     if (hw->mac_type >= e1000_82544) {
@@ -1932,7 +1933,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
 
     /* Config DSP to improve Giga link quality */
     if (hw->phy_type == e1000_phy_igp) {
-        ret_val = e1000_config_dsp_after_link_change(hw, TRUE);
+        ret_val = e1000_config_dsp_after_link_change(hw, true);
         if (ret_val) {
             DEBUGOUT("Error Configuring DSP after link up\n");
             return ret_val;
@@ -1947,13 +1948,13 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
-static int32_t
+static s32
 e1000_setup_copper_link(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t i;
-    uint16_t phy_data;
-    uint16_t reg_data;
+    s32 ret_val;
+    u16 i;
+    u16 phy_data;
+    u16 reg_data;
 
     DEBUGFUNC("e1000_setup_copper_link");
 
@@ -2061,12 +2062,12 @@ e1000_setup_copper_link(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
-static int32_t
-e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex)
+static s32
+e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
 {
-    int32_t ret_val = E1000_SUCCESS;
-    uint32_t tipg;
-    uint16_t reg_data;
+    s32 ret_val = E1000_SUCCESS;
+    u32 tipg;
+    u16 reg_data;
 
     DEBUGFUNC("e1000_configure_kmrn_for_10_100");
 
@@ -2097,12 +2098,12 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex)
     return ret_val;
 }
 
-static int32_t
+static s32
 e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
 {
-    int32_t ret_val = E1000_SUCCESS;
-    uint16_t reg_data;
-    uint32_t tipg;
+    s32 ret_val = E1000_SUCCESS;
+    u16 reg_data;
+    u32 tipg;
 
     DEBUGFUNC("e1000_configure_kmrn_for_1000");
 
@@ -2134,12 +2135,12 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
-int32_t
+s32
 e1000_phy_setup_autoneg(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t mii_autoneg_adv_reg;
-    uint16_t mii_1000t_ctrl_reg;
+    s32 ret_val;
+    u16 mii_autoneg_adv_reg;
+    u16 mii_1000t_ctrl_reg;
 
     DEBUGFUNC("e1000_phy_setup_autoneg");
 
@@ -2283,15 +2284,15 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
-static int32_t
+static s32
 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
 {
-    uint32_t ctrl;
-    int32_t ret_val;
-    uint16_t mii_ctrl_reg;
-    uint16_t mii_status_reg;
-    uint16_t phy_data;
-    uint16_t i;
+    u32 ctrl;
+    s32 ret_val;
+    u16 mii_ctrl_reg;
+    u16 mii_status_reg;
+    u16 phy_data;
+    u16 i;
 
     DEBUGFUNC("e1000_phy_force_speed_duplex");
 
@@ -2537,7 +2538,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
 void
 e1000_config_collision_dist(struct e1000_hw *hw)
 {
-    uint32_t tctl, coll_dist;
+    u32 tctl, coll_dist;
 
     DEBUGFUNC("e1000_config_collision_dist");
 
@@ -2564,12 +2565,12 @@ e1000_config_collision_dist(struct e1000_hw *hw)
 * The contents of the PHY register containing the needed information need to
 * be passed in.
 ******************************************************************************/
-static int32_t
+static s32
 e1000_config_mac_to_phy(struct e1000_hw *hw)
 {
-    uint32_t ctrl;
-    int32_t ret_val;
-    uint16_t phy_data;
+    u32 ctrl;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_config_mac_to_phy");
 
@@ -2623,10 +2624,10 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
  * by the PHY rather than the MAC. Software must also configure these
  * bits when link is forced on a fiber connection.
  *****************************************************************************/
-int32_t
+s32
 e1000_force_mac_fc(struct e1000_hw *hw)
 {
-    uint32_t ctrl;
+    u32 ctrl;
 
     DEBUGFUNC("e1000_force_mac_fc");
 
@@ -2690,15 +2691,15 @@ e1000_force_mac_fc(struct e1000_hw *hw)
  * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
  * and RFCE bits will be automaticaly set to the negotiated flow control mode.
  *****************************************************************************/
-static int32_t
+static s32
 e1000_config_fc_after_link_up(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t mii_status_reg;
-    uint16_t mii_nway_adv_reg;
-    uint16_t mii_nway_lp_ability_reg;
-    uint16_t speed;
-    uint16_t duplex;
+    s32 ret_val;
+    u16 mii_status_reg;
+    u16 mii_nway_adv_reg;
+    u16 mii_nway_lp_ability_reg;
+    u16 speed;
+    u16 duplex;
 
     DEBUGFUNC("e1000_config_fc_after_link_up");
 
@@ -2895,17 +2896,17 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
  *
  * Called by any function that needs to check the link status of the adapter.
  *****************************************************************************/
-int32_t
+s32
 e1000_check_for_link(struct e1000_hw *hw)
 {
-    uint32_t rxcw = 0;
-    uint32_t ctrl;
-    uint32_t status;
-    uint32_t rctl;
-    uint32_t icr;
-    uint32_t signal = 0;
-    int32_t ret_val;
-    uint16_t phy_data;
+    u32 rxcw = 0;
+    u32 ctrl;
+    u32 status;
+    u32 rctl;
+    u32 icr;
+    u32 signal = 0;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_check_for_link");
 
@@ -2923,7 +2924,7 @@ e1000_check_for_link(struct e1000_hw *hw)
         if (hw->media_type == e1000_media_type_fiber) {
             signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
             if (status & E1000_STATUS_LU)
-                hw->get_link_status = FALSE;
+                hw->get_link_status = false;
         }
     }
 
@@ -2947,7 +2948,7 @@ e1000_check_for_link(struct e1000_hw *hw)
             return ret_val;
 
         if (phy_data & MII_SR_LINK_STATUS) {
-            hw->get_link_status = FALSE;
+            hw->get_link_status = false;
             /* Check if there was DownShift, must be checked immediately after
              * link-up */
             e1000_check_downshift(hw);
@@ -2973,7 +2974,7 @@ e1000_check_for_link(struct e1000_hw *hw)
 
         } else {
             /* No link detected */
-            e1000_config_dsp_after_link_change(hw, FALSE);
+            e1000_config_dsp_after_link_change(hw, false);
             return 0;
         }
 
@@ -2983,7 +2984,7 @@ e1000_check_for_link(struct e1000_hw *hw)
         if (!hw->autoneg) return -E1000_ERR_CONFIG;
 
         /* optimize the dsp settings for the igp phy */
-        e1000_config_dsp_after_link_change(hw, TRUE);
+        e1000_config_dsp_after_link_change(hw, true);
 
         /* We have a M88E1000 PHY and Auto-Neg is enabled.  If we
          * have Si on board that is 82544 or newer, Auto
@@ -3021,7 +3022,7 @@ e1000_check_for_link(struct e1000_hw *hw)
          * at gigabit speed, we turn on TBI compatibility.
          */
         if (hw->tbi_compatibility_en) {
-            uint16_t speed, duplex;
+            u16 speed, duplex;
             ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
             if (ret_val) {
                 DEBUGOUT("Error getting link speed and duplex\n");
@@ -3036,7 +3037,7 @@ e1000_check_for_link(struct e1000_hw *hw)
                     rctl = E1000_READ_REG(hw, RCTL);
                     rctl &= ~E1000_RCTL_SBP;
                     E1000_WRITE_REG(hw, RCTL, rctl);
-                    hw->tbi_compatibility_on = FALSE;
+                    hw->tbi_compatibility_on = false;
                 }
             } else {
                 /* If TBI compatibility is was previously off, turn it on. For
@@ -3045,7 +3046,7 @@ e1000_check_for_link(struct e1000_hw *hw)
                  * will look like CRC errors to to the hardware.
                  */
                 if (!hw->tbi_compatibility_on) {
-                    hw->tbi_compatibility_on = TRUE;
+                    hw->tbi_compatibility_on = true;
                     rctl = E1000_READ_REG(hw, RCTL);
                     rctl |= E1000_RCTL_SBP;
                     E1000_WRITE_REG(hw, RCTL, rctl);
@@ -3098,7 +3099,7 @@ e1000_check_for_link(struct e1000_hw *hw)
         E1000_WRITE_REG(hw, TXCW, hw->txcw);
         E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
 
-        hw->serdes_link_down = FALSE;
+        hw->serdes_link_down = false;
     }
     /* If we force link for non-auto-negotiation switch, check link status
      * based on MAC synchronization for internal serdes media type.
@@ -3109,11 +3110,11 @@ e1000_check_for_link(struct e1000_hw *hw)
         udelay(10);
         if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
             if (!(rxcw & E1000_RXCW_IV)) {
-                hw->serdes_link_down = FALSE;
+                hw->serdes_link_down = false;
                 DEBUGOUT("SERDES: Link is up.\n");
             }
         } else {
-            hw->serdes_link_down = TRUE;
+            hw->serdes_link_down = true;
             DEBUGOUT("SERDES: Link is down.\n");
         }
     }
@@ -3131,14 +3132,14 @@ e1000_check_for_link(struct e1000_hw *hw)
  * speed - Speed of the connection
  * duplex - Duplex setting of the connection
  *****************************************************************************/
-int32_t
+s32
 e1000_get_speed_and_duplex(struct e1000_hw *hw,
-                           uint16_t *speed,
-                           uint16_t *duplex)
+                           u16 *speed,
+                           u16 *duplex)
 {
-    uint32_t status;
-    int32_t ret_val;
-    uint16_t phy_data;
+    u32 status;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_get_speed_and_duplex");
 
@@ -3213,12 +3214,12 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
-static int32_t
+static s32
 e1000_wait_autoneg(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t i;
-    uint16_t phy_data;
+    s32 ret_val;
+    u16 i;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_wait_autoneg");
     DEBUGOUT("Waiting for Auto-Neg to complete.\n");
@@ -3250,7 +3251,7 @@ e1000_wait_autoneg(struct e1000_hw *hw)
 ******************************************************************************/
 static void
 e1000_raise_mdi_clk(struct e1000_hw *hw,
-                    uint32_t *ctrl)
+                    u32 *ctrl)
 {
     /* Raise the clock input to the Management Data Clock (by setting the MDC
      * bit), and then delay 10 microseconds.
@@ -3268,7 +3269,7 @@ e1000_raise_mdi_clk(struct e1000_hw *hw,
 ******************************************************************************/
 static void
 e1000_lower_mdi_clk(struct e1000_hw *hw,
-                    uint32_t *ctrl)
+                    u32 *ctrl)
 {
     /* Lower the clock input to the Management Data Clock (by clearing the MDC
      * bit), and then delay 10 microseconds.
@@ -3289,11 +3290,11 @@ e1000_lower_mdi_clk(struct e1000_hw *hw,
 ******************************************************************************/
 static void
 e1000_shift_out_mdi_bits(struct e1000_hw *hw,
-                         uint32_t data,
-                         uint16_t count)
+                         u32 data,
+                         u16 count)
 {
-    uint32_t ctrl;
-    uint32_t mask;
+    u32 ctrl;
+    u32 mask;
 
     /* We need to shift "count" number of bits out to the PHY. So, the value
      * in the "data" parameter will be shifted out to the PHY one bit at a
@@ -3337,12 +3338,12 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw,
 *
 * Bits are shifted in in MSB to LSB order.
 ******************************************************************************/
-static uint16_t
+static u16
 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
 {
-    uint32_t ctrl;
-    uint16_t data = 0;
-    uint8_t i;
+    u32 ctrl;
+    u16 data = 0;
+    u8 i;
 
     /* In order to read a register from the PHY, we need to shift in a total
      * of 18 bits from the PHY. The first two bit (turnaround) times are used
@@ -3383,13 +3384,13 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
     return data;
 }
 
-static int32_t
-e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
+static s32
+e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
 {
-    uint32_t swfw_sync = 0;
-    uint32_t swmask = mask;
-    uint32_t fwmask = mask << 16;
-    int32_t timeout = 200;
+    u32 swfw_sync = 0;
+    u32 swmask = mask;
+    u32 fwmask = mask << 16;
+    s32 timeout = 200;
 
     DEBUGFUNC("e1000_swfw_sync_acquire");
 
@@ -3428,10 +3429,10 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
 }
 
 static void
-e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
+e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
 {
-    uint32_t swfw_sync;
-    uint32_t swmask = mask;
+    u32 swfw_sync;
+    u32 swmask = mask;
 
     DEBUGFUNC("e1000_swfw_sync_release");
 
@@ -3463,13 +3464,13 @@ e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
 * hw - Struct containing variables accessed by shared code
 * reg_addr - address of the PHY register to read
 ******************************************************************************/
-int32_t
+s32
 e1000_read_phy_reg(struct e1000_hw *hw,
-                   uint32_t reg_addr,
-                   uint16_t *phy_data)
+                   u32 reg_addr,
+                   u16 *phy_data)
 {
-    uint32_t ret_val;
-    uint16_t swfw;
+    u32 ret_val;
+    u16 swfw;
 
     DEBUGFUNC("e1000_read_phy_reg");
 
@@ -3487,7 +3488,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
         hw->phy_type == e1000_phy_igp_2) &&
        (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
         ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
-                                         (uint16_t)reg_addr);
+                                         (u16)reg_addr);
         if (ret_val) {
             e1000_swfw_sync_release(hw, swfw);
             return ret_val;
@@ -3498,14 +3499,14 @@ e1000_read_phy_reg(struct e1000_hw *hw,
             /* Select Configuration Page */
             if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
                 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
-                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
             } else {
                 /* Use Alternative Page Select register to access
                  * registers 30 and 31
                  */
                 ret_val = e1000_write_phy_reg_ex(hw,
                                                  GG82563_PHY_PAGE_SELECT_ALT,
-                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
             }
 
             if (ret_val) {
@@ -3522,13 +3523,13 @@ e1000_read_phy_reg(struct e1000_hw *hw,
     return ret_val;
 }
 
-static int32_t
-e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
-                      uint16_t *phy_data)
+static s32
+e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+                      u16 *phy_data)
 {
-    uint32_t i;
-    uint32_t mdic = 0;
-    const uint32_t phy_addr = 1;
+    u32 i;
+    u32 mdic = 0;
+    const u32 phy_addr = 1;
 
     DEBUGFUNC("e1000_read_phy_reg_ex");
 
@@ -3562,7 +3563,7 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
             DEBUGOUT("MDI Error\n");
             return -E1000_ERR_PHY;
         }
-        *phy_data = (uint16_t) mdic;
+        *phy_data = (u16) mdic;
     } else {
         /* We must first send a preamble through the MDIO pin to signal the
          * beginning of an MII instruction.  This is done by sending 32
@@ -3602,12 +3603,12 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
 * reg_addr - address of the PHY register to write
 * data - data to write to the PHY
 ******************************************************************************/
-int32_t
-e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr,
-                    uint16_t phy_data)
+s32
+e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr,
+                    u16 phy_data)
 {
-    uint32_t ret_val;
-    uint16_t swfw;
+    u32 ret_val;
+    u16 swfw;
 
     DEBUGFUNC("e1000_write_phy_reg");
 
@@ -3625,7 +3626,7 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr,
         hw->phy_type == e1000_phy_igp_2) &&
        (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
         ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
-                                         (uint16_t)reg_addr);
+                                         (u16)reg_addr);
         if (ret_val) {
             e1000_swfw_sync_release(hw, swfw);
             return ret_val;
@@ -3636,14 +3637,14 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr,
             /* Select Configuration Page */
             if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
                 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
-                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
             } else {
                 /* Use Alternative Page Select register to access
                  * registers 30 and 31
                  */
                 ret_val = e1000_write_phy_reg_ex(hw,
                                                  GG82563_PHY_PAGE_SELECT_ALT,
-                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
             }
 
             if (ret_val) {
@@ -3660,13 +3661,13 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr,
     return ret_val;
 }
 
-static int32_t
-e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
-                       uint16_t phy_data)
+static s32
+e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+                       u16 phy_data)
 {
-    uint32_t i;
-    uint32_t mdic = 0;
-    const uint32_t phy_addr = 1;
+    u32 i;
+    u32 mdic = 0;
+    const u32 phy_addr = 1;
 
     DEBUGFUNC("e1000_write_phy_reg_ex");
 
@@ -3680,7 +3681,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
          * for the PHY register in the MDI Control register.  The MAC will take
          * care of interfacing with the PHY to send the desired data.
          */
-        mdic = (((uint32_t) phy_data) |
+        mdic = (((u32) phy_data) |
                 (reg_addr << E1000_MDIC_REG_SHIFT) |
                 (phy_addr << E1000_MDIC_PHY_SHIFT) |
                 (E1000_MDIC_OP_WRITE));
@@ -3714,7 +3715,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
         mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
                 (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
         mdic <<= 16;
-        mdic |= (uint32_t) phy_data;
+        mdic |= (u32) phy_data;
 
         e1000_shift_out_mdi_bits(hw, mdic, 32);
     }
@@ -3722,13 +3723,13 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
     return E1000_SUCCESS;
 }
 
-static int32_t
+static s32
 e1000_read_kmrn_reg(struct e1000_hw *hw,
-                    uint32_t reg_addr,
-                    uint16_t *data)
+                    u32 reg_addr,
+                    u16 *data)
 {
-    uint32_t reg_val;
-    uint16_t swfw;
+    u32 reg_val;
+    u16 swfw;
     DEBUGFUNC("e1000_read_kmrn_reg");
 
     if ((hw->mac_type == e1000_80003es2lan) &&
@@ -3749,19 +3750,19 @@ e1000_read_kmrn_reg(struct e1000_hw *hw,
 
     /* Read the data returned */
     reg_val = E1000_READ_REG(hw, KUMCTRLSTA);
-    *data = (uint16_t)reg_val;
+    *data = (u16)reg_val;
 
     e1000_swfw_sync_release(hw, swfw);
     return E1000_SUCCESS;
 }
 
-static int32_t
+static s32
 e1000_write_kmrn_reg(struct e1000_hw *hw,
-                     uint32_t reg_addr,
-                     uint16_t data)
+                     u32 reg_addr,
+                     u16 data)
 {
-    uint32_t reg_val;
-    uint16_t swfw;
+    u32 reg_val;
+    u16 swfw;
     DEBUGFUNC("e1000_write_kmrn_reg");
 
     if ((hw->mac_type == e1000_80003es2lan) &&
@@ -3787,13 +3788,13 @@ e1000_write_kmrn_reg(struct e1000_hw *hw,
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
-int32_t
+s32
 e1000_phy_hw_reset(struct e1000_hw *hw)
 {
-    uint32_t ctrl, ctrl_ext;
-    uint32_t led_ctrl;
-    int32_t ret_val;
-    uint16_t swfw;
+    u32 ctrl, ctrl_ext;
+    u32 led_ctrl;
+    s32 ret_val;
+    u16 swfw;
 
     DEBUGFUNC("e1000_phy_hw_reset");
 
@@ -3881,11 +3882,11 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
 *
 * Sets bit 15 of the MII Control register
 ******************************************************************************/
-int32_t
+s32
 e1000_phy_reset(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t phy_data;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_phy_reset");
 
@@ -3936,9 +3937,9 @@ e1000_phy_reset(struct e1000_hw *hw)
 void
 e1000_phy_powerdown_workaround(struct e1000_hw *hw)
 {
-    int32_t reg;
-    uint16_t phy_data;
-    int32_t retry = 0;
+    s32 reg;
+    u16 phy_data;
+    s32 retry = 0;
 
     DEBUGFUNC("e1000_phy_powerdown_workaround");
 
@@ -3986,13 +3987,13 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
 *
 * hw - struct containing variables accessed by shared code
 ******************************************************************************/
-static int32_t
+static s32
 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    int32_t reg;
-    int32_t cnt;
-    uint16_t phy_data;
+    s32 ret_val;
+    s32 reg;
+    s32 cnt;
+    u16 phy_data;
 
     if (hw->kmrn_lock_loss_workaround_disabled)
         return E1000_SUCCESS;
@@ -4039,12 +4040,12 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
-static int32_t
+static s32
 e1000_detect_gig_phy(struct e1000_hw *hw)
 {
-    int32_t phy_init_status, ret_val;
-    uint16_t phy_id_high, phy_id_low;
-    boolean_t match = FALSE;
+    s32 phy_init_status, ret_val;
+    u16 phy_id_high, phy_id_low;
+    bool match = false;
 
     DEBUGFUNC("e1000_detect_gig_phy");
 
@@ -4075,46 +4076,46 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
     if (ret_val)
         return ret_val;
 
-    hw->phy_id = (uint32_t) (phy_id_high << 16);
+    hw->phy_id = (u32) (phy_id_high << 16);
     udelay(20);
     ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
     if (ret_val)
         return ret_val;
 
-    hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK);
-    hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK;
+    hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
+    hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
 
     switch (hw->mac_type) {
     case e1000_82543:
-        if (hw->phy_id == M88E1000_E_PHY_ID) match = TRUE;
+        if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
         break;
     case e1000_82544:
-        if (hw->phy_id == M88E1000_I_PHY_ID) match = TRUE;
+        if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
         break;
     case e1000_82540:
     case e1000_82545:
     case e1000_82545_rev_3:
     case e1000_82546:
     case e1000_82546_rev_3:
-        if (hw->phy_id == M88E1011_I_PHY_ID) match = TRUE;
+        if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
         break;
     case e1000_82541:
     case e1000_82541_rev_2:
     case e1000_82547:
     case e1000_82547_rev_2:
-        if (hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE;
+        if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
         break;
     case e1000_82573:
-        if (hw->phy_id == M88E1111_I_PHY_ID) match = TRUE;
+        if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
         break;
     case e1000_80003es2lan:
-        if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE;
+        if (hw->phy_id == GG82563_E_PHY_ID) match = true;
         break;
     case e1000_ich8lan:
-        if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE;
-        if (hw->phy_id == IFE_E_PHY_ID) match = TRUE;
-        if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE;
-        if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE;
+        if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
+        if (hw->phy_id == IFE_E_PHY_ID) match = true;
+        if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
+        if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
         break;
     default:
         DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
@@ -4135,10 +4136,10 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
-static int32_t
+static s32
 e1000_phy_reset_dsp(struct e1000_hw *hw)
 {
-    int32_t ret_val;
+    s32 ret_val;
     DEBUGFUNC("e1000_phy_reset_dsp");
 
     do {
@@ -4162,12 +4163,12 @@ e1000_phy_reset_dsp(struct e1000_hw *hw)
 * hw - Struct containing variables accessed by shared code
 * phy_info - PHY information structure
 ******************************************************************************/
-static int32_t
+static s32
 e1000_phy_igp_get_info(struct e1000_hw *hw,
                        struct e1000_phy_info *phy_info)
 {
-    int32_t ret_val;
-    uint16_t phy_data, min_length, max_length, average;
+    s32 ret_val;
+    u16 phy_data, min_length, max_length, average;
     e1000_rev_polarity polarity;
 
     DEBUGFUNC("e1000_phy_igp_get_info");
@@ -4239,12 +4240,12 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
 * hw - Struct containing variables accessed by shared code
 * phy_info - PHY information structure
 ******************************************************************************/
-static int32_t
+static s32
 e1000_phy_ife_get_info(struct e1000_hw *hw,
                        struct e1000_phy_info *phy_info)
 {
-    int32_t ret_val;
-    uint16_t phy_data;
+    s32 ret_val;
+    u16 phy_data;
     e1000_rev_polarity polarity;
 
     DEBUGFUNC("e1000_phy_ife_get_info");
@@ -4289,12 +4290,12 @@ e1000_phy_ife_get_info(struct e1000_hw *hw,
 * hw - Struct containing variables accessed by shared code
 * phy_info - PHY information structure
 ******************************************************************************/
-static int32_t
+static s32
 e1000_phy_m88_get_info(struct e1000_hw *hw,
                        struct e1000_phy_info *phy_info)
 {
-    int32_t ret_val;
-    uint16_t phy_data;
+    s32 ret_val;
+    u16 phy_data;
     e1000_rev_polarity polarity;
 
     DEBUGFUNC("e1000_phy_m88_get_info");
@@ -4368,12 +4369,12 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
 * hw - Struct containing variables accessed by shared code
 * phy_info - PHY information structure
 ******************************************************************************/
-int32_t
+s32
 e1000_phy_get_info(struct e1000_hw *hw,
                    struct e1000_phy_info *phy_info)
 {
-    int32_t ret_val;
-    uint16_t phy_data;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_phy_get_info");
 
@@ -4414,7 +4415,7 @@ e1000_phy_get_info(struct e1000_hw *hw,
         return e1000_phy_m88_get_info(hw, phy_info);
 }
 
-int32_t
+s32
 e1000_validate_mdi_setting(struct e1000_hw *hw)
 {
     DEBUGFUNC("e1000_validate_mdi_settings");
@@ -4435,13 +4436,13 @@ e1000_validate_mdi_setting(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-int32_t
+s32
 e1000_init_eeprom_params(struct e1000_hw *hw)
 {
     struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    uint32_t eecd = E1000_READ_REG(hw, EECD);
-    int32_t ret_val = E1000_SUCCESS;
-    uint16_t eeprom_size;
+    u32 eecd = E1000_READ_REG(hw, EECD);
+    s32 ret_val = E1000_SUCCESS;
+    u16 eeprom_size;
 
     DEBUGFUNC("e1000_init_eeprom_params");
 
@@ -4455,8 +4456,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
         eeprom->opcode_bits = 3;
         eeprom->address_bits = 6;
         eeprom->delay_usec = 50;
-        eeprom->use_eerd = FALSE;
-        eeprom->use_eewr = FALSE;
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
         break;
     case e1000_82540:
     case e1000_82545:
@@ -4473,8 +4474,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
             eeprom->word_size = 64;
             eeprom->address_bits = 6;
         }
-        eeprom->use_eerd = FALSE;
-        eeprom->use_eewr = FALSE;
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
         break;
     case e1000_82541:
     case e1000_82541_rev_2:
@@ -4503,8 +4504,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
                 eeprom->address_bits = 6;
             }
         }
-        eeprom->use_eerd = FALSE;
-        eeprom->use_eewr = FALSE;
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
         break;
     case e1000_82571:
     case e1000_82572:
@@ -4518,8 +4519,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
             eeprom->page_size = 8;
             eeprom->address_bits = 8;
         }
-        eeprom->use_eerd = FALSE;
-        eeprom->use_eewr = FALSE;
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
         break;
     case e1000_82573:
         eeprom->type = e1000_eeprom_spi;
@@ -4532,9 +4533,9 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
             eeprom->page_size = 8;
             eeprom->address_bits = 8;
         }
-        eeprom->use_eerd = TRUE;
-        eeprom->use_eewr = TRUE;
-        if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
+        eeprom->use_eerd = true;
+        eeprom->use_eewr = true;
+        if (!e1000_is_onboard_nvm_eeprom(hw)) {
             eeprom->type = e1000_eeprom_flash;
             eeprom->word_size = 2048;
 
@@ -4555,24 +4556,24 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
             eeprom->page_size = 8;
             eeprom->address_bits = 8;
         }
-        eeprom->use_eerd = TRUE;
-        eeprom->use_eewr = FALSE;
+        eeprom->use_eerd = true;
+        eeprom->use_eewr = false;
         break;
     case e1000_ich8lan:
         {
-        int32_t  i = 0;
-        uint32_t flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
+        s32  i = 0;
+        u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
 
         eeprom->type = e1000_eeprom_ich8;
-        eeprom->use_eerd = FALSE;
-        eeprom->use_eewr = FALSE;
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
         eeprom->word_size = E1000_SHADOW_RAM_WORDS;
 
         /* Zero the shadow RAM structure. But don't load it from NVM
          * so as to save time for driver init */
         if (hw->eeprom_shadow_ram != NULL) {
             for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
-                hw->eeprom_shadow_ram[i].modified = FALSE;
+                hw->eeprom_shadow_ram[i].modified = false;
                 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
             }
         }
@@ -4585,7 +4586,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
 
         hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
 
-        hw->flash_bank_size /= 2 * sizeof(uint16_t);
+        hw->flash_bank_size /= 2 * sizeof(u16);
 
         break;
         }
@@ -4610,7 +4611,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
             if (eeprom_size)
                 eeprom_size++;
         } else {
-            eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+            eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
                           E1000_EECD_SIZE_EX_SHIFT);
         }
 
@@ -4627,7 +4628,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
  *****************************************************************************/
 static void
 e1000_raise_ee_clk(struct e1000_hw *hw,
-                   uint32_t *eecd)
+                   u32 *eecd)
 {
     /* Raise the clock input to the EEPROM (by setting the SK bit), and then
      * wait <delay> microseconds.
@@ -4646,7 +4647,7 @@ e1000_raise_ee_clk(struct e1000_hw *hw,
  *****************************************************************************/
 static void
 e1000_lower_ee_clk(struct e1000_hw *hw,
-                   uint32_t *eecd)
+                   u32 *eecd)
 {
     /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
      * wait 50 microseconds.
@@ -4666,12 +4667,12 @@ e1000_lower_ee_clk(struct e1000_hw *hw,
  *****************************************************************************/
 static void
 e1000_shift_out_ee_bits(struct e1000_hw *hw,
-                        uint16_t data,
-                        uint16_t count)
+                        u16 data,
+                        u16 count)
 {
     struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    uint32_t eecd;
-    uint32_t mask;
+    u32 eecd;
+    u32 mask;
 
     /* We need to shift "count" bits out to the EEPROM. So, value in the
      * "data" parameter will be shifted out to the EEPROM one bit at a time.
@@ -4717,13 +4718,13 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-static uint16_t
+static u16
 e1000_shift_in_ee_bits(struct e1000_hw *hw,
-                       uint16_t count)
+                       u16 count)
 {
-    uint32_t eecd;
-    uint32_t i;
-    uint16_t data;
+    u32 eecd;
+    u32 i;
+    u16 data;
 
     /* In order to read a register from the EEPROM, we need to shift 'count'
      * bits in from the EEPROM. Bits are "shifted in" by raising the clock
@@ -4761,11 +4762,11 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw,
  * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
  * function should be called before issuing a command to the EEPROM.
  *****************************************************************************/
-static int32_t
+static s32
 e1000_acquire_eeprom(struct e1000_hw *hw)
 {
     struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    uint32_t eecd, i=0;
+    u32 eecd, i=0;
 
     DEBUGFUNC("e1000_acquire_eeprom");
 
@@ -4824,7 +4825,7 @@ static void
 e1000_standby_eeprom(struct e1000_hw *hw)
 {
     struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    uint32_t eecd;
+    u32 eecd;
 
     eecd = E1000_READ_REG(hw, EECD);
 
@@ -4872,7 +4873,7 @@ e1000_standby_eeprom(struct e1000_hw *hw)
 static void
 e1000_release_eeprom(struct e1000_hw *hw)
 {
-    uint32_t eecd;
+    u32 eecd;
 
     DEBUGFUNC("e1000_release_eeprom");
 
@@ -4920,11 +4921,11 @@ e1000_release_eeprom(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-static int32_t
+static s32
 e1000_spi_eeprom_ready(struct e1000_hw *hw)
 {
-    uint16_t retry_count = 0;
-    uint8_t spi_stat_reg;
+    u16 retry_count = 0;
+    u8 spi_stat_reg;
 
     DEBUGFUNC("e1000_spi_eeprom_ready");
 
@@ -4937,7 +4938,7 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw)
     do {
         e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
                                 hw->eeprom.opcode_bits);
-        spi_stat_reg = (uint8_t)e1000_shift_in_ee_bits(hw, 8);
+        spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
         if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
             break;
 
@@ -4966,14 +4967,14 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw)
  * data - word read from the EEPROM
  * words - number of words to read
  *****************************************************************************/
-int32_t
+s32
 e1000_read_eeprom(struct e1000_hw *hw,
-                  uint16_t offset,
-                  uint16_t words,
-                  uint16_t *data)
+                  u16 offset,
+                  u16 words,
+                  u16 *data)
 {
     struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    uint32_t i = 0;
+    u32 i = 0;
 
     DEBUGFUNC("e1000_read_eeprom");
 
@@ -4994,15 +4995,14 @@ e1000_read_eeprom(struct e1000_hw *hw,
      * directly. In this case, we need to acquire the EEPROM so that
      * FW or other port software does not interrupt.
      */
-    if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
-        hw->eeprom.use_eerd == FALSE) {
+    if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
         /* Prepare the EEPROM for bit-bang reading */
         if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
             return -E1000_ERR_EEPROM;
     }
 
     /* Eerd register EEPROM access requires no eeprom aquire/release */
-    if (eeprom->use_eerd == TRUE)
+    if (eeprom->use_eerd)
         return e1000_read_eeprom_eerd(hw, offset, words, data);
 
     /* ICH EEPROM access is done via the ICH flash controller */
@@ -5012,8 +5012,8 @@ e1000_read_eeprom(struct e1000_hw *hw,
     /* Set up the SPI or Microwire EEPROM for bit-bang reading.  We have
      * acquired the EEPROM at this point, so any returns should relase it */
     if (eeprom->type == e1000_eeprom_spi) {
-        uint16_t word_in;
-        uint8_t read_opcode = EEPROM_READ_OPCODE_SPI;
+        u16 word_in;
+        u8 read_opcode = EEPROM_READ_OPCODE_SPI;
 
         if (e1000_spi_eeprom_ready(hw)) {
             e1000_release_eeprom(hw);
@@ -5028,7 +5028,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
 
         /* Send the READ command (opcode + addr)  */
         e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
-        e1000_shift_out_ee_bits(hw, (uint16_t)(offset*2), eeprom->address_bits);
+        e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
 
         /* Read the data.  The address of the eeprom internally increments with
          * each byte (spi) being read, saving on the overhead of eeprom setup
@@ -5044,7 +5044,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
             /* Send the READ command (opcode + addr)  */
             e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
                                     eeprom->opcode_bits);
-            e1000_shift_out_ee_bits(hw, (uint16_t)(offset + i),
+            e1000_shift_out_ee_bits(hw, (u16)(offset + i),
                                     eeprom->address_bits);
 
             /* Read the data.  For microwire, each word requires the overhead
@@ -5068,14 +5068,14 @@ e1000_read_eeprom(struct e1000_hw *hw,
  * data - word read from the EEPROM
  * words - number of words to read
  *****************************************************************************/
-static int32_t
+static s32
 e1000_read_eeprom_eerd(struct e1000_hw *hw,
-                  uint16_t offset,
-                  uint16_t words,
-                  uint16_t *data)
+                  u16 offset,
+                  u16 words,
+                  u16 *data)
 {
-    uint32_t i, eerd = 0;
-    int32_t error = 0;
+    u32 i, eerd = 0;
+    s32 error = 0;
 
     for (i = 0; i < words; i++) {
         eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
@@ -5102,15 +5102,15 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
  * data - word read from the EEPROM
  * words - number of words to read
  *****************************************************************************/
-static int32_t
+static s32
 e1000_write_eeprom_eewr(struct e1000_hw *hw,
-                   uint16_t offset,
-                   uint16_t words,
-                   uint16_t *data)
+                   u16 offset,
+                   u16 words,
+                   u16 *data)
 {
-    uint32_t    register_value = 0;
-    uint32_t    i              = 0;
-    int32_t     error          = 0;
+    u32    register_value = 0;
+    u32    i              = 0;
+    s32     error          = 0;
 
     if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
         return -E1000_ERR_SWFW_SYNC;
@@ -5143,12 +5143,12 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-static int32_t
+static s32
 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
 {
-    uint32_t attempts = 100000;
-    uint32_t i, reg = 0;
-    int32_t done = E1000_ERR_EEPROM;
+    u32 attempts = 100000;
+    u32 i, reg = 0;
+    s32 done = E1000_ERR_EEPROM;
 
     for (i = 0; i < attempts; i++) {
         if (eerd == E1000_EEPROM_POLL_READ)
@@ -5171,15 +5171,15 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
 *
 * hw - Struct containing variables accessed by shared code
 ****************************************************************************/
-static boolean_t
+static bool
 e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
 {
-    uint32_t eecd = 0;
+    u32 eecd = 0;
 
     DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
 
     if (hw->mac_type == e1000_ich8lan)
-        return FALSE;
+        return false;
 
     if (hw->mac_type == e1000_82573) {
         eecd = E1000_READ_REG(hw, EECD);
@@ -5189,10 +5189,10 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
 
         /* If both bits are set, device is Flash type */
         if (eecd == 0x03) {
-            return FALSE;
+            return false;
         }
     }
-    return TRUE;
+    return true;
 }
 
 /******************************************************************************
@@ -5204,16 +5204,15 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
  * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
  * valid.
  *****************************************************************************/
-int32_t
+s32
 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
 {
-    uint16_t checksum = 0;
-    uint16_t i, eeprom_data;
+    u16 checksum = 0;
+    u16 i, eeprom_data;
 
     DEBUGFUNC("e1000_validate_eeprom_checksum");
 
-    if ((hw->mac_type == e1000_82573) &&
-        (e1000_is_onboard_nvm_eeprom(hw) == FALSE)) {
+    if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) {
         /* Check bit 4 of word 10h.  If it is 0, firmware is done updating
          * 10h-12h.  Checksum may need to be fixed. */
         e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
@@ -5253,7 +5252,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
         checksum += eeprom_data;
     }
 
-    if (checksum == (uint16_t) EEPROM_SUM)
+    if (checksum == (u16) EEPROM_SUM)
         return E1000_SUCCESS;
     else {
         DEBUGOUT("EEPROM Checksum Invalid\n");
@@ -5269,12 +5268,12 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
  * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
  * Writes the difference to word offset 63 of the EEPROM.
  *****************************************************************************/
-int32_t
+s32
 e1000_update_eeprom_checksum(struct e1000_hw *hw)
 {
-    uint32_t ctrl_ext;
-    uint16_t checksum = 0;
-    uint16_t i, eeprom_data;
+    u32 ctrl_ext;
+    u16 checksum = 0;
+    u16 i, eeprom_data;
 
     DEBUGFUNC("e1000_update_eeprom_checksum");
 
@@ -5285,7 +5284,7 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
         }
         checksum += eeprom_data;
     }
-    checksum = (uint16_t) EEPROM_SUM - checksum;
+    checksum = (u16) EEPROM_SUM - checksum;
     if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
         DEBUGOUT("EEPROM Write Error\n");
         return -E1000_ERR_EEPROM;
@@ -5314,14 +5313,14 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
  * If e1000_update_eeprom_checksum is not called after this function, the
  * EEPROM will most likely contain an invalid checksum.
  *****************************************************************************/
-int32_t
+s32
 e1000_write_eeprom(struct e1000_hw *hw,
-                   uint16_t offset,
-                   uint16_t words,
-                   uint16_t *data)
+                   u16 offset,
+                   u16 words,
+                   u16 *data)
 {
     struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    int32_t status = 0;
+    s32 status = 0;
 
     DEBUGFUNC("e1000_write_eeprom");
 
@@ -5339,7 +5338,7 @@ e1000_write_eeprom(struct e1000_hw *hw,
     }
 
     /* 82573 writes only through eewr */
-    if (eeprom->use_eewr == TRUE)
+    if (eeprom->use_eewr)
         return e1000_write_eeprom_eewr(hw, offset, words, data);
 
     if (eeprom->type == e1000_eeprom_ich8)
@@ -5371,19 +5370,19 @@ e1000_write_eeprom(struct e1000_hw *hw,
  * data - pointer to array of 8 bit words to be written to the EEPROM
  *
  *****************************************************************************/
-static int32_t
+static s32
 e1000_write_eeprom_spi(struct e1000_hw *hw,
-                       uint16_t offset,
-                       uint16_t words,
-                       uint16_t *data)
+                       u16 offset,
+                       u16 words,
+                       u16 *data)
 {
     struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    uint16_t widx = 0;
+    u16 widx = 0;
 
     DEBUGFUNC("e1000_write_eeprom_spi");
 
     while (widx < words) {
-        uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI;
+        u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
 
         if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
 
@@ -5402,14 +5401,14 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
         /* Send the Write command (8-bit opcode + addr) */
         e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
 
-        e1000_shift_out_ee_bits(hw, (uint16_t)((offset + widx)*2),
+        e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2),
                                 eeprom->address_bits);
 
         /* Send the data */
 
         /* Loop to allow for up to whole page write (32 bytes) of eeprom */
         while (widx < words) {
-            uint16_t word_out = data[widx];
+            u16 word_out = data[widx];
             word_out = (word_out >> 8) | (word_out << 8);
             e1000_shift_out_ee_bits(hw, word_out, 16);
             widx++;
@@ -5437,16 +5436,16 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
  * data - pointer to array of 16 bit words to be written to the EEPROM
  *
  *****************************************************************************/
-static int32_t
+static s32
 e1000_write_eeprom_microwire(struct e1000_hw *hw,
-                             uint16_t offset,
-                             uint16_t words,
-                             uint16_t *data)
+                             u16 offset,
+                             u16 words,
+                             u16 *data)
 {
     struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    uint32_t eecd;
-    uint16_t words_written = 0;
-    uint16_t i = 0;
+    u32 eecd;
+    u16 words_written = 0;
+    u16 i = 0;
 
     DEBUGFUNC("e1000_write_eeprom_microwire");
 
@@ -5457,9 +5456,9 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
      * EEPROM into write/erase mode.
      */
     e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
-                            (uint16_t)(eeprom->opcode_bits + 2));
+                            (u16)(eeprom->opcode_bits + 2));
 
-    e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2));
+    e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
 
     /* Prepare the EEPROM */
     e1000_standby_eeprom(hw);
@@ -5469,7 +5468,7 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
         e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
                                 eeprom->opcode_bits);
 
-        e1000_shift_out_ee_bits(hw, (uint16_t)(offset + words_written),
+        e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
                                 eeprom->address_bits);
 
         /* Send the data */
@@ -5507,9 +5506,9 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
      * EEPROM out of write/erase mode.
      */
     e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
-                            (uint16_t)(eeprom->opcode_bits + 2));
+                            (u16)(eeprom->opcode_bits + 2));
 
-    e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2));
+    e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
 
     return E1000_SUCCESS;
 }
@@ -5524,19 +5523,19 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
  * data - word read from the EEPROM
  * words - number of words to read
  *****************************************************************************/
-static int32_t
+static s32
 e1000_commit_shadow_ram(struct e1000_hw *hw)
 {
-    uint32_t attempts = 100000;
-    uint32_t eecd = 0;
-    uint32_t flop = 0;
-    uint32_t i = 0;
-    int32_t error = E1000_SUCCESS;
-    uint32_t old_bank_offset = 0;
-    uint32_t new_bank_offset = 0;
-    uint8_t low_byte = 0;
-    uint8_t high_byte = 0;
-    boolean_t sector_write_failed = FALSE;
+    u32 attempts = 100000;
+    u32 eecd = 0;
+    u32 flop = 0;
+    u32 i = 0;
+    s32 error = E1000_SUCCESS;
+    u32 old_bank_offset = 0;
+    u32 new_bank_offset = 0;
+    u8 low_byte = 0;
+    u8 high_byte = 0;
+    bool sector_write_failed = false;
 
     if (hw->mac_type == e1000_82573) {
         /* The flop register will be used to determine if flash type is STM */
@@ -5588,24 +5587,24 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
             e1000_erase_ich8_4k_segment(hw, 0);
         }
 
-        sector_write_failed = FALSE;
+        sector_write_failed = false;
         /* Loop for every byte in the shadow RAM,
          * which is in units of words. */
         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
             /* Determine whether to write the value stored
              * in the other NVM bank or a modified value stored
              * in the shadow RAM */
-            if (hw->eeprom_shadow_ram[i].modified == TRUE) {
-                low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word;
+            if (hw->eeprom_shadow_ram[i].modified) {
+                low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
                 udelay(100);
                 error = e1000_verify_write_ich8_byte(hw,
                             (i << 1) + new_bank_offset, low_byte);
 
                 if (error != E1000_SUCCESS)
-                    sector_write_failed = TRUE;
+                    sector_write_failed = true;
                 else {
                     high_byte =
-                        (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
+                        (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
                     udelay(100);
                 }
             } else {
@@ -5616,7 +5615,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
                             (i << 1) + new_bank_offset, low_byte);
 
                 if (error != E1000_SUCCESS)
-                    sector_write_failed = TRUE;
+                    sector_write_failed = true;
                 else {
                     e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
                                          &high_byte);
@@ -5624,10 +5623,10 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
                 }
             }
 
-            /* If the write of the low byte was successful, go ahread and
+            /* If the write of the low byte was successful, go ahead and
              * write the high byte while checking to make sure that if it
              * is the signature byte, then it is handled properly */
-            if (sector_write_failed == FALSE) {
+            if (!sector_write_failed) {
                 /* If the word is 0x13, then make sure the signature bits
                  * (15:14) are 11b until the commit has completed.
                  * This will allow us to write 10b which indicates the
@@ -5640,7 +5639,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
                 error = e1000_verify_write_ich8_byte(hw,
                             (i << 1) + new_bank_offset + 1, high_byte);
                 if (error != E1000_SUCCESS)
-                    sector_write_failed = TRUE;
+                    sector_write_failed = true;
 
             } else {
                 /* If the write failed then break from the loop and
@@ -5651,7 +5650,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
 
         /* Don't bother writing the segment valid bits if sector
          * programming failed. */
-        if (sector_write_failed == FALSE) {
+        if (!sector_write_failed) {
             /* Finally validate the new segment by setting bit 15:14
              * to 10b in word 0x13 , this can be done without an
              * erase as well since these bits are 11 to start with
@@ -5673,7 +5672,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
 
             /* Clear the now not used entry in the cache */
             for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
-                hw->eeprom_shadow_ram[i].modified = FALSE;
+                hw->eeprom_shadow_ram[i].modified = false;
                 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
             }
         }
@@ -5688,11 +5687,11 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-int32_t
+s32
 e1000_read_mac_addr(struct e1000_hw * hw)
 {
-    uint16_t offset;
-    uint16_t eeprom_data, i;
+    u16 offset;
+    u16 eeprom_data, i;
 
     DEBUGFUNC("e1000_read_mac_addr");
 
@@ -5702,8 +5701,8 @@ e1000_read_mac_addr(struct e1000_hw * hw)
             DEBUGOUT("EEPROM Read Error\n");
             return -E1000_ERR_EEPROM;
         }
-        hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
-        hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
+        hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
+        hw->perm_mac_addr[i+1] = (u8) (eeprom_data >> 8);
     }
 
     switch (hw->mac_type) {
@@ -5735,8 +5734,8 @@ e1000_read_mac_addr(struct e1000_hw * hw)
 static void
 e1000_init_rx_addrs(struct e1000_hw *hw)
 {
-    uint32_t i;
-    uint32_t rar_num;
+    u32 i;
+    u32 rar_num;
 
     DEBUGFUNC("e1000_init_rx_addrs");
 
@@ -5750,7 +5749,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
     /* Reserve a spot for the Locally Administered Address to work around
      * an 82571 issue in which a reset on one port will reload the MAC on
      * the other port. */
-    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
+    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present))
         rar_num -= 1;
     if (hw->mac_type == e1000_ich8lan)
         rar_num = E1000_RAR_ENTRIES_ICH8LAN;
@@ -5771,11 +5770,11 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
  * hw - Struct containing variables accessed by shared code
  * mc_addr - the multicast address to hash
  *****************************************************************************/
-uint32_t
+u32
 e1000_hash_mc_addr(struct e1000_hw *hw,
-                   uint8_t *mc_addr)
+                   u8 *mc_addr)
 {
-    uint32_t hash_value = 0;
+    u32 hash_value = 0;
 
     /* The portion of the address that is used for the hash table is
      * determined by the mc_filter_type setting.
@@ -5788,37 +5787,37 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
     case 0:
         if (hw->mac_type == e1000_ich8lan) {
             /* [47:38] i.e. 0x158 for above example address */
-            hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2));
+            hash_value = ((mc_addr[4] >> 6) | (((u16) mc_addr[5]) << 2));
         } else {
             /* [47:36] i.e. 0x563 for above example address */
-            hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+            hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
         }
         break;
     case 1:
         if (hw->mac_type == e1000_ich8lan) {
             /* [46:37] i.e. 0x2B1 for above example address */
-            hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3));
+            hash_value = ((mc_addr[4] >> 5) | (((u16) mc_addr[5]) << 3));
         } else {
             /* [46:35] i.e. 0xAC6 for above example address */
-            hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
+            hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
         }
         break;
     case 2:
         if (hw->mac_type == e1000_ich8lan) {
             /*[45:36] i.e. 0x163 for above example address */
-            hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+            hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
         } else {
             /* [45:34] i.e. 0x5D8 for above example address */
-            hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+            hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
         }
         break;
     case 3:
         if (hw->mac_type == e1000_ich8lan) {
             /* [43:34] i.e. 0x18D for above example address */
-            hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+            hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
         } else {
             /* [43:32] i.e. 0x634 for above example address */
-            hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
+            hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
         }
         break;
     }
@@ -5838,11 +5837,11 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
  *****************************************************************************/
 void
 e1000_mta_set(struct e1000_hw *hw,
-              uint32_t hash_value)
+              u32 hash_value)
 {
-    uint32_t hash_bit, hash_reg;
-    uint32_t mta;
-    uint32_t temp;
+    u32 hash_bit, hash_reg;
+    u32 mta;
+    u32 temp;
 
     /* The MTA is a register array of 128 32-bit registers.
      * It is treated like an array of 4096 bits.  We want to set
@@ -5887,18 +5886,18 @@ e1000_mta_set(struct e1000_hw *hw,
  *****************************************************************************/
 void
 e1000_rar_set(struct e1000_hw *hw,
-              uint8_t *addr,
-              uint32_t index)
+              u8 *addr,
+              u32 index)
 {
-    uint32_t rar_low, rar_high;
+    u32 rar_low, rar_high;
 
     /* HW expects these in little endian so we reverse the byte order
      * from network order (big endian) to little endian
      */
-    rar_low = ((uint32_t) addr[0] |
-               ((uint32_t) addr[1] << 8) |
-               ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24));
-    rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8));
+    rar_low = ((u32) addr[0] |
+               ((u32) addr[1] << 8) |
+               ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+    rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
 
     /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
      * unit hang.
@@ -5922,7 +5921,7 @@ e1000_rar_set(struct e1000_hw *hw,
     case e1000_82571:
     case e1000_82572:
     case e1000_80003es2lan:
-        if (hw->leave_av_bit_off == TRUE)
+        if (hw->leave_av_bit_off)
             break;
     default:
         /* Indicate to hardware the Address is Valid. */
@@ -5945,10 +5944,10 @@ e1000_rar_set(struct e1000_hw *hw,
  *****************************************************************************/
 void
 e1000_write_vfta(struct e1000_hw *hw,
-                 uint32_t offset,
-                 uint32_t value)
+                 u32 offset,
+                 u32 value)
 {
-    uint32_t temp;
+    u32 temp;
 
     if (hw->mac_type == e1000_ich8lan)
         return;
@@ -5973,10 +5972,10 @@ e1000_write_vfta(struct e1000_hw *hw,
 static void
 e1000_clear_vfta(struct e1000_hw *hw)
 {
-    uint32_t offset;
-    uint32_t vfta_value = 0;
-    uint32_t vfta_offset = 0;
-    uint32_t vfta_bit_in_reg = 0;
+    u32 offset;
+    u32 vfta_value = 0;
+    u32 vfta_offset = 0;
+    u32 vfta_bit_in_reg = 0;
 
     if (hw->mac_type == e1000_ich8lan)
         return;
@@ -6004,15 +6003,15 @@ e1000_clear_vfta(struct e1000_hw *hw)
     }
 }
 
-static int32_t
+static s32
 e1000_id_led_init(struct e1000_hw * hw)
 {
-    uint32_t ledctl;
-    const uint32_t ledctl_mask = 0x000000FF;
-    const uint32_t ledctl_on = E1000_LEDCTL_MODE_LED_ON;
-    const uint32_t ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
-    uint16_t eeprom_data, i, temp;
-    const uint16_t led_mask = 0x0F;
+    u32 ledctl;
+    const u32 ledctl_mask = 0x000000FF;
+    const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+    const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+    u16 eeprom_data, i, temp;
+    const u16 led_mask = 0x0F;
 
     DEBUGFUNC("e1000_id_led_init");
 
@@ -6087,11 +6086,11 @@ e1000_id_led_init(struct e1000_hw * hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-int32_t
+s32
 e1000_setup_led(struct e1000_hw *hw)
 {
-    uint32_t ledctl;
-    int32_t ret_val = E1000_SUCCESS;
+    u32 ledctl;
+    s32 ret_val = E1000_SUCCESS;
 
     DEBUGFUNC("e1000_setup_led");
 
@@ -6112,7 +6111,7 @@ e1000_setup_led(struct e1000_hw *hw)
         if (ret_val)
             return ret_val;
         ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
-                                      (uint16_t)(hw->phy_spd_default &
+                                      (u16)(hw->phy_spd_default &
                                       ~IGP01E1000_GMII_SPD));
         if (ret_val)
             return ret_val;
@@ -6146,11 +6145,11 @@ e1000_setup_led(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-int32_t
+s32
 e1000_blink_led_start(struct e1000_hw *hw)
 {
-    int16_t  i;
-    uint32_t ledctl_blink = 0;
+    s16  i;
+    u32 ledctl_blink = 0;
 
     DEBUGFUNC("e1000_id_led_blink_on");
 
@@ -6181,10 +6180,10 @@ e1000_blink_led_start(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-int32_t
+s32
 e1000_cleanup_led(struct e1000_hw *hw)
 {
-    int32_t ret_val = E1000_SUCCESS;
+    s32 ret_val = E1000_SUCCESS;
 
     DEBUGFUNC("e1000_cleanup_led");
 
@@ -6223,10 +6222,10 @@ e1000_cleanup_led(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-int32_t
+s32
 e1000_led_on(struct e1000_hw *hw)
 {
-    uint32_t ctrl = E1000_READ_REG(hw, CTRL);
+    u32 ctrl = E1000_READ_REG(hw, CTRL);
 
     DEBUGFUNC("e1000_led_on");
 
@@ -6274,10 +6273,10 @@ e1000_led_on(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-int32_t
+s32
 e1000_led_off(struct e1000_hw *hw)
 {
-    uint32_t ctrl = E1000_READ_REG(hw, CTRL);
+    u32 ctrl = E1000_READ_REG(hw, CTRL);
 
     DEBUGFUNC("e1000_led_off");
 
@@ -6328,7 +6327,7 @@ e1000_led_off(struct e1000_hw *hw)
 static void
 e1000_clear_hw_cntrs(struct e1000_hw *hw)
 {
-    volatile uint32_t temp;
+    volatile u32 temp;
 
     temp = E1000_READ_REG(hw, CRCERRS);
     temp = E1000_READ_REG(hw, SYMERRS);
@@ -6425,7 +6424,7 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
  * hw - Struct containing variables accessed by shared code
  *
  * Call this after e1000_init_hw. You may override the IFS defaults by setting
- * hw->ifs_params_forced to TRUE. However, you must initialize hw->
+ * hw->ifs_params_forced to true. However, you must initialize hw->
  * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
  * before calling this function.
  *****************************************************************************/
@@ -6442,7 +6441,7 @@ e1000_reset_adaptive(struct e1000_hw *hw)
             hw->ifs_step_size = IFS_STEP;
             hw->ifs_ratio = IFS_RATIO;
         }
-        hw->in_ifs_mode = FALSE;
+        hw->in_ifs_mode = false;
         E1000_WRITE_REG(hw, AIT, 0);
     } else {
         DEBUGOUT("Not in Adaptive IFS mode!\n");
@@ -6465,7 +6464,7 @@ e1000_update_adaptive(struct e1000_hw *hw)
     if (hw->adaptive_ifs) {
         if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
             if (hw->tx_packet_delta > MIN_NUM_XMITS) {
-                hw->in_ifs_mode = TRUE;
+                hw->in_ifs_mode = true;
                 if (hw->current_ifs_val < hw->ifs_max_val) {
                     if (hw->current_ifs_val == 0)
                         hw->current_ifs_val = hw->ifs_min_val;
@@ -6477,7 +6476,7 @@ e1000_update_adaptive(struct e1000_hw *hw)
         } else {
             if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
                 hw->current_ifs_val = 0;
-                hw->in_ifs_mode = FALSE;
+                hw->in_ifs_mode = false;
                 E1000_WRITE_REG(hw, AIT, 0);
             }
         }
@@ -6496,10 +6495,10 @@ e1000_update_adaptive(struct e1000_hw *hw)
 void
 e1000_tbi_adjust_stats(struct e1000_hw *hw,
                        struct e1000_hw_stats *stats,
-                       uint32_t frame_len,
-                       uint8_t *mac_addr)
+                       u32 frame_len,
+                       u8 *mac_addr)
 {
-    uint64_t carry_bit;
+    u64 carry_bit;
 
     /* First adjust the frame length. */
     frame_len--;
@@ -6528,7 +6527,7 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
      * since the test for a multicast frame will test positive on
      * a broadcast frame.
      */
-    if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff))
+    if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
         /* Broadcast packet */
         stats->bprc++;
     else if (*mac_addr & 0x01)
@@ -6574,9 +6573,9 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
 void
 e1000_get_bus_info(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t pci_ex_link_status;
-    uint32_t status;
+    s32 ret_val;
+    u16 pci_ex_link_status;
+    u32 status;
 
     switch (hw->mac_type) {
     case e1000_82542_rev2_0:
@@ -6648,8 +6647,8 @@ e1000_get_bus_info(struct e1000_hw *hw)
  *****************************************************************************/
 static void
 e1000_write_reg_io(struct e1000_hw *hw,
-                   uint32_t offset,
-                   uint32_t value)
+                   u32 offset,
+                   u32 value)
 {
     unsigned long io_addr = hw->io_base;
     unsigned long io_data = hw->io_base + 4;
@@ -6673,15 +6672,15 @@ e1000_write_reg_io(struct e1000_hw *hw,
  * register to the minimum and maximum range.
  * For IGP phy's, the function calculates the range by the AGC registers.
  *****************************************************************************/
-static int32_t
+static s32
 e1000_get_cable_length(struct e1000_hw *hw,
-                       uint16_t *min_length,
-                       uint16_t *max_length)
+                       u16 *min_length,
+                       u16 *max_length)
 {
-    int32_t ret_val;
-    uint16_t agc_value = 0;
-    uint16_t i, phy_data;
-    uint16_t cable_length;
+    s32 ret_val;
+    u16 agc_value = 0;
+    u16 i, phy_data;
+    u16 cable_length;
 
     DEBUGFUNC("e1000_get_cable_length");
 
@@ -6752,9 +6751,9 @@ e1000_get_cable_length(struct e1000_hw *hw,
             break;
         }
     } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
-        uint16_t cur_agc_value;
-        uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
-        uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+        u16 cur_agc_value;
+        u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+        u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
                                                          {IGP01E1000_PHY_AGC_A,
                                                           IGP01E1000_PHY_AGC_B,
                                                           IGP01E1000_PHY_AGC_C,
@@ -6800,9 +6799,9 @@ e1000_get_cable_length(struct e1000_hw *hw,
                       IGP01E1000_AGC_RANGE;
     } else if (hw->phy_type == e1000_phy_igp_2 ||
                hw->phy_type == e1000_phy_igp_3) {
-        uint16_t cur_agc_index, max_agc_index = 0;
-        uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
-        uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+        u16 cur_agc_index, max_agc_index = 0;
+        u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
+        u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
                                                          {IGP02E1000_PHY_AGC_A,
                                                           IGP02E1000_PHY_AGC_B,
                                                           IGP02E1000_PHY_AGC_C,
@@ -6864,12 +6863,12 @@ e1000_get_cable_length(struct e1000_hw *hw,
  * return 0.  If the link speed is 1000 Mbps the polarity status is in the
  * IGP01E1000_PHY_PCS_INIT_REG.
  *****************************************************************************/
-static int32_t
+static s32
 e1000_check_polarity(struct e1000_hw *hw,
                      e1000_rev_polarity *polarity)
 {
-    int32_t ret_val;
-    uint16_t phy_data;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_check_polarity");
 
@@ -6940,11 +6939,11 @@ e1000_check_polarity(struct e1000_hw *hw,
  * Link Health register.  In IGP this bit is latched high, so the driver must
  * read it immediately after link is established.
  *****************************************************************************/
-static int32_t
+static s32
 e1000_check_downshift(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t phy_data;
+    s32 ret_val;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_check_downshift");
 
@@ -6968,7 +6967,7 @@ e1000_check_downshift(struct e1000_hw *hw)
                                M88E1000_PSSR_DOWNSHIFT_SHIFT;
     } else if (hw->phy_type == e1000_phy_ife) {
         /* e1000_phy_ife supports 10/100 speed only */
-        hw->speed_downgraded = FALSE;
+        hw->speed_downgraded = false;
     }
 
     return E1000_SUCCESS;
@@ -6986,18 +6985,18 @@ e1000_check_downshift(struct e1000_hw *hw)
  *
  ****************************************************************************/
 
-static int32_t
+static s32
 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
-                                   boolean_t link_up)
+                                   bool link_up)
 {
-    int32_t ret_val;
-    uint16_t phy_data, phy_saved_data, speed, duplex, i;
-    uint16_t dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+    s32 ret_val;
+    u16 phy_data, phy_saved_data, speed, duplex, i;
+    u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
                                         {IGP01E1000_PHY_AGC_PARAM_A,
                                         IGP01E1000_PHY_AGC_PARAM_B,
                                         IGP01E1000_PHY_AGC_PARAM_C,
                                         IGP01E1000_PHY_AGC_PARAM_D};
-    uint16_t min_length, max_length;
+    u16 min_length, max_length;
 
     DEBUGFUNC("e1000_config_dsp_after_link_change");
 
@@ -7039,8 +7038,8 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
             if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
                (min_length < e1000_igp_cable_length_50)) {
 
-                uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
-                uint32_t idle_errs = 0;
+                u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+                u32 idle_errs = 0;
 
                 /* clear previous idle error counts */
                 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
@@ -7174,11 +7173,11 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
  *
  * hw - Struct containing variables accessed by shared code
  ****************************************************************************/
-static int32_t
+static s32
 e1000_set_phy_mode(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t eeprom_data;
+    s32 ret_val;
+    u16 eeprom_data;
 
     DEBUGFUNC("e1000_set_phy_mode");
 
@@ -7198,7 +7197,7 @@ e1000_set_phy_mode(struct e1000_hw *hw)
             if (ret_val)
                 return ret_val;
 
-            hw->phy_reset_disable = FALSE;
+            hw->phy_reset_disable = false;
         }
     }
 
@@ -7219,13 +7218,13 @@ e1000_set_phy_mode(struct e1000_hw *hw)
  *
  ****************************************************************************/
 
-static int32_t
+static s32
 e1000_set_d3_lplu_state(struct e1000_hw *hw,
-                        boolean_t active)
+                        bool active)
 {
-    uint32_t phy_ctrl = 0;
-    int32_t ret_val;
-    uint16_t phy_data;
+    u32 phy_ctrl = 0;
+    s32 ret_val;
+    u16 phy_data;
     DEBUGFUNC("e1000_set_d3_lplu_state");
 
     if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
@@ -7349,13 +7348,13 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
  *
  ****************************************************************************/
 
-static int32_t
+static s32
 e1000_set_d0_lplu_state(struct e1000_hw *hw,
-                        boolean_t active)
+                        bool active)
 {
-    uint32_t phy_ctrl = 0;
-    int32_t ret_val;
-    uint16_t phy_data;
+    u32 phy_ctrl = 0;
+    s32 ret_val;
+    u16 phy_data;
     DEBUGFUNC("e1000_set_d0_lplu_state");
 
     if (hw->mac_type <= e1000_82547_rev_2)
@@ -7440,12 +7439,12 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-static int32_t
+static s32
 e1000_set_vco_speed(struct e1000_hw *hw)
 {
-    int32_t  ret_val;
-    uint16_t default_page = 0;
-    uint16_t phy_data;
+    s32  ret_val;
+    u16 default_page = 0;
+    u16 phy_data;
 
     DEBUGFUNC("e1000_set_vco_speed");
 
@@ -7504,18 +7503,18 @@ e1000_set_vco_speed(struct e1000_hw *hw)
  *
  * returns: - E1000_SUCCESS .
  ****************************************************************************/
-static int32_t
-e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
+static s32
+e1000_host_if_read_cookie(struct e1000_hw * hw, u8 *buffer)
 {
-    uint8_t i;
-    uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
-    uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH;
+    u8 i;
+    u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET;
+    u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
 
     length = (length >> 2);
     offset = (offset >> 2);
 
     for (i = 0; i < length; i++) {
-        *((uint32_t *) buffer + i) =
+        *((u32 *) buffer + i) =
             E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
     }
     return E1000_SUCCESS;
@@ -7531,11 +7530,11 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
  *            timeout
  *          - E1000_SUCCESS for success.
  ****************************************************************************/
-static int32_t
+static s32
 e1000_mng_enable_host_if(struct e1000_hw * hw)
 {
-    uint32_t hicr;
-    uint8_t i;
+    u32 hicr;
+    u8 i;
 
     /* Check that the host interface is enabled. */
     hicr = E1000_READ_REG(hw, HICR);
@@ -7565,14 +7564,14 @@ e1000_mng_enable_host_if(struct e1000_hw * hw)
  *
  * returns  - E1000_SUCCESS for success.
  ****************************************************************************/
-static int32_t
-e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
-                        uint16_t length, uint16_t offset, uint8_t *sum)
+static s32
+e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer,
+                        u16 length, u16 offset, u8 *sum)
 {
-    uint8_t *tmp;
-    uint8_t *bufptr = buffer;
-    uint32_t data = 0;
-    uint16_t remaining, i, j, prev_bytes;
+    u8 *tmp;
+    u8 *bufptr = buffer;
+    u32 data = 0;
+    u16 remaining, i, j, prev_bytes;
 
     /* sum = only sum of the data and it is not checksum */
 
@@ -7580,14 +7579,14 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
         return -E1000_ERR_PARAM;
     }
 
-    tmp = (uint8_t *)&data;
+    tmp = (u8 *)&data;
     prev_bytes = offset & 0x3;
     offset &= 0xFFFC;
     offset >>= 2;
 
     if (prev_bytes) {
         data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
-        for (j = prev_bytes; j < sizeof(uint32_t); j++) {
+        for (j = prev_bytes; j < sizeof(u32); j++) {
             *(tmp + j) = *bufptr++;
             *sum += *(tmp + j);
         }
@@ -7605,7 +7604,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
     /* The device driver writes the relevant command block into the
      * ram area. */
     for (i = 0; i < length; i++) {
-        for (j = 0; j < sizeof(uint32_t); j++) {
+        for (j = 0; j < sizeof(u32); j++) {
             *(tmp + j) = *bufptr++;
             *sum += *(tmp + j);
         }
@@ -7613,7 +7612,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
         E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
     }
     if (remaining) {
-        for (j = 0; j < sizeof(uint32_t); j++) {
+        for (j = 0; j < sizeof(u32); j++) {
             if (j < remaining)
                 *(tmp + j) = *bufptr++;
             else
@@ -7633,23 +7632,23 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
  *
  * returns  - E1000_SUCCESS for success.
  ****************************************************************************/
-static int32_t
+static s32
 e1000_mng_write_cmd_header(struct e1000_hw * hw,
                            struct e1000_host_mng_command_header * hdr)
 {
-    uint16_t i;
-    uint8_t sum;
-    uint8_t *buffer;
+    u16 i;
+    u8 sum;
+    u8 *buffer;
 
     /* Write the whole command header structure which includes sum of
      * the buffer */
 
-    uint16_t length = sizeof(struct e1000_host_mng_command_header);
+    u16 length = sizeof(struct e1000_host_mng_command_header);
 
     sum = hdr->checksum;
     hdr->checksum = 0;
 
-    buffer = (uint8_t *) hdr;
+    buffer = (u8 *) hdr;
     i = length;
     while (i--)
         sum += buffer[i];
@@ -7659,7 +7658,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
     length >>= 2;
     /* The device driver writes the relevant command block into the ram area. */
     for (i = 0; i < length; i++) {
-        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i));
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *) hdr + i));
         E1000_WRITE_FLUSH(hw);
     }
 
@@ -7673,10 +7672,10 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
  *
  * returns  - E1000_SUCCESS for success.
  ****************************************************************************/
-static int32_t
+static s32
 e1000_mng_write_commit(struct e1000_hw * hw)
 {
-    uint32_t hicr;
+    u32 hicr;
 
     hicr = E1000_READ_REG(hw, HICR);
     /* Setting this bit tells the ARC that a new command is pending. */
@@ -7689,35 +7688,35 @@ e1000_mng_write_commit(struct e1000_hw * hw)
 /*****************************************************************************
  * This function checks the mode of the firmware.
  *
- * returns  - TRUE when the mode is IAMT or FALSE.
+ * returns  - true when the mode is IAMT or false.
  ****************************************************************************/
-boolean_t
+bool
 e1000_check_mng_mode(struct e1000_hw *hw)
 {
-    uint32_t fwsm;
+    u32 fwsm;
 
     fwsm = E1000_READ_REG(hw, FWSM);
 
     if (hw->mac_type == e1000_ich8lan) {
         if ((fwsm & E1000_FWSM_MODE_MASK) ==
             (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
-            return TRUE;
+            return true;
     } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
                (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
-        return TRUE;
+        return true;
 
-    return FALSE;
+    return false;
 }
 
 
 /*****************************************************************************
  * This function writes the dhcp info .
  ****************************************************************************/
-int32_t
-e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer,
-                          uint16_t length)
+s32
+e1000_mng_write_dhcp_info(struct e1000_hw * hw, u8 *buffer,
+                          u16 length)
 {
-    int32_t ret_val;
+    s32 ret_val;
     struct e1000_host_mng_command_header hdr;
 
     hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
@@ -7745,11 +7744,11 @@ e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer,
  *
  * returns  - checksum of buffer contents.
  ****************************************************************************/
-static uint8_t
-e1000_calculate_mng_checksum(char *buffer, uint32_t length)
+static u8
+e1000_calculate_mng_checksum(char *buffer, u32 length)
 {
-    uint8_t sum = 0;
-    uint32_t i;
+    u8 sum = 0;
+    u32 i;
 
     if (!buffer)
         return 0;
@@ -7757,23 +7756,23 @@ e1000_calculate_mng_checksum(char *buffer, uint32_t length)
     for (i=0; i < length; i++)
         sum += buffer[i];
 
-    return (uint8_t) (0 - sum);
+    return (u8) (0 - sum);
 }
 
 /*****************************************************************************
  * This function checks whether tx pkt filtering needs to be enabled or not.
  *
- * returns  - TRUE for packet filtering or FALSE.
+ * returns  - true for packet filtering or false.
  ****************************************************************************/
-boolean_t
+bool
 e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
 {
     /* called in init as well as watchdog timer functions */
 
-    int32_t ret_val, checksum;
-    boolean_t tx_filter = FALSE;
+    s32 ret_val, checksum;
+    bool tx_filter = false;
     struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
-    uint8_t *buffer = (uint8_t *) &(hw->mng_cookie);
+    u8 *buffer = (u8 *) &(hw->mng_cookie);
 
     if (e1000_check_mng_mode(hw)) {
         ret_val = e1000_mng_enable_host_if(hw);
@@ -7787,11 +7786,11 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
                                                E1000_MNG_DHCP_COOKIE_LENGTH)) {
                     if (hdr->status &
                         E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
-                        tx_filter = TRUE;
+                        tx_filter = true;
                 } else
-                    tx_filter = TRUE;
+                    tx_filter = true;
             } else
-                tx_filter = TRUE;
+                tx_filter = true;
         }
     }
 
@@ -7804,41 +7803,41 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *
- * returns: - TRUE/FALSE
+ * returns: - true/false
  *
  *****************************************************************************/
-uint32_t
+u32
 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
 {
-    uint32_t manc;
-    uint32_t fwsm, factps;
+    u32 manc;
+    u32 fwsm, factps;
 
     if (hw->asf_firmware_present) {
         manc = E1000_READ_REG(hw, MANC);
 
         if (!(manc & E1000_MANC_RCV_TCO_EN) ||
             !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
-            return FALSE;
-        if (e1000_arc_subsystem_valid(hw) == TRUE) {
+            return false;
+        if (e1000_arc_subsystem_valid(hw)) {
             fwsm = E1000_READ_REG(hw, FWSM);
             factps = E1000_READ_REG(hw, FACTPS);
 
             if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
                    e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
-                return TRUE;
+                return true;
         } else
             if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
-                return TRUE;
+                return true;
     }
-    return FALSE;
+    return false;
 }
 
-static int32_t
+static s32
 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
 {
-    int32_t ret_val;
-    uint16_t mii_status_reg;
-    uint16_t i;
+    s32 ret_val;
+    u16 mii_status_reg;
+    u16 i;
 
     /* Polarity reversal workaround for forced 10F/10H links. */
 
@@ -7930,7 +7929,7 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
 static void
 e1000_set_pci_express_master_disable(struct e1000_hw *hw)
 {
-    uint32_t ctrl;
+    u32 ctrl;
 
     DEBUGFUNC("e1000_set_pci_express_master_disable");
 
@@ -7953,10 +7952,10 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
  *            E1000_SUCCESS master requests disabled.
  *
  ******************************************************************************/
-int32_t
+s32
 e1000_disable_pciex_master(struct e1000_hw *hw)
 {
-    int32_t timeout = MASTER_DISABLE_TIMEOUT;   /* 80ms */
+    s32 timeout = MASTER_DISABLE_TIMEOUT;   /* 80ms */
 
     DEBUGFUNC("e1000_disable_pciex_master");
 
@@ -7991,10 +7990,10 @@ e1000_disable_pciex_master(struct e1000_hw *hw)
  *            E1000_SUCCESS at any other case.
  *
  ******************************************************************************/
-static int32_t
+static s32
 e1000_get_auto_rd_done(struct e1000_hw *hw)
 {
-    int32_t timeout = AUTO_READ_DONE_TIMEOUT;
+    s32 timeout = AUTO_READ_DONE_TIMEOUT;
 
     DEBUGFUNC("e1000_get_auto_rd_done");
 
@@ -8039,11 +8038,11 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
  *            E1000_SUCCESS at any other case.
  *
  ***************************************************************************/
-static int32_t
+static s32
 e1000_get_phy_cfg_done(struct e1000_hw *hw)
 {
-    int32_t timeout = PHY_CFG_TIMEOUT;
-    uint32_t cfg_mask = E1000_EEPROM_CFG_DONE;
+    s32 timeout = PHY_CFG_TIMEOUT;
+    u32 cfg_mask = E1000_EEPROM_CFG_DONE;
 
     DEBUGFUNC("e1000_get_phy_cfg_done");
 
@@ -8086,11 +8085,11 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
  *            E1000_SUCCESS at any other case.
  *
  ***************************************************************************/
-static int32_t
+static s32
 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
 {
-    int32_t timeout;
-    uint32_t swsm;
+    s32 timeout;
+    u32 swsm;
 
     DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
 
@@ -8139,7 +8138,7 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
 static void
 e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
 {
-    uint32_t swsm;
+    u32 swsm;
 
     DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
 
@@ -8165,11 +8164,11 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
  *            E1000_SUCCESS at any other case.
  *
  ***************************************************************************/
-static int32_t
+static s32
 e1000_get_software_semaphore(struct e1000_hw *hw)
 {
-    int32_t timeout = hw->eeprom.word_size + 1;
-    uint32_t swsm;
+    s32 timeout = hw->eeprom.word_size + 1;
+    u32 swsm;
 
     DEBUGFUNC("e1000_get_software_semaphore");
 
@@ -8204,7 +8203,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
 static void
 e1000_release_software_semaphore(struct e1000_hw *hw)
 {
-    uint32_t swsm;
+    u32 swsm;
 
     DEBUGFUNC("e1000_release_software_semaphore");
 
@@ -8229,11 +8228,11 @@ e1000_release_software_semaphore(struct e1000_hw *hw)
  *            E1000_SUCCESS
  *
  *****************************************************************************/
-int32_t
+s32
 e1000_check_phy_reset_block(struct e1000_hw *hw)
 {
-    uint32_t manc = 0;
-    uint32_t fwsm = 0;
+    u32 manc = 0;
+    u32 fwsm = 0;
 
     if (hw->mac_type == e1000_ich8lan) {
         fwsm = E1000_READ_REG(hw, FWSM);
@@ -8247,10 +8246,10 @@ e1000_check_phy_reset_block(struct e1000_hw *hw)
         E1000_BLK_PHY_RESET : E1000_SUCCESS;
 }
 
-static uint8_t
+static u8
 e1000_arc_subsystem_valid(struct e1000_hw *hw)
 {
-    uint32_t fwsm;
+    u32 fwsm;
 
     /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
      * may not be provided a DMA clock when no manageability features are
@@ -8264,14 +8263,14 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
     case e1000_80003es2lan:
         fwsm = E1000_READ_REG(hw, FWSM);
         if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
-            return TRUE;
+            return true;
         break;
     case e1000_ich8lan:
-        return TRUE;
+        return true;
     default:
         break;
     }
-    return FALSE;
+    return false;
 }
 
 
@@ -8284,10 +8283,10 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
  * returns: E1000_SUCCESS
  *
  *****************************************************************************/
-static int32_t
-e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
+static s32
+e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
 {
-    uint32_t gcr_reg = 0;
+    u32 gcr_reg = 0;
 
     DEBUGFUNC("e1000_set_pci_ex_no_snoop");
 
@@ -8304,7 +8303,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
         E1000_WRITE_REG(hw, GCR, gcr_reg);
     }
     if (hw->mac_type == e1000_ich8lan) {
-        uint32_t ctrl_ext;
+        u32 ctrl_ext;
 
         E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL);
 
@@ -8325,11 +8324,11 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
  * hw: Struct containing variables accessed by shared code
  *
  ***************************************************************************/
-static int32_t
+static s32
 e1000_get_software_flag(struct e1000_hw *hw)
 {
-    int32_t timeout = PHY_CFG_TIMEOUT;
-    uint32_t extcnf_ctrl;
+    s32 timeout = PHY_CFG_TIMEOUT;
+    u32 extcnf_ctrl;
 
     DEBUGFUNC("e1000_get_software_flag");
 
@@ -8367,7 +8366,7 @@ e1000_get_software_flag(struct e1000_hw *hw)
 static void
 e1000_release_software_flag(struct e1000_hw *hw)
 {
-    uint32_t extcnf_ctrl;
+    u32 extcnf_ctrl;
 
     DEBUGFUNC("e1000_release_software_flag");
 
@@ -8389,16 +8388,16 @@ e1000_release_software_flag(struct e1000_hw *hw)
  * data - word read from the EEPROM
  * words - number of words to read
  *****************************************************************************/
-static int32_t
-e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
-                       uint16_t *data)
+static s32
+e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+                       u16 *data)
 {
-    int32_t  error = E1000_SUCCESS;
-    uint32_t flash_bank = 0;
-    uint32_t act_offset = 0;
-    uint32_t bank_offset = 0;
-    uint16_t word = 0;
-    uint16_t i = 0;
+    s32  error = E1000_SUCCESS;
+    u32 flash_bank = 0;
+    u32 act_offset = 0;
+    u32 bank_offset = 0;
+    u16 word = 0;
+    u16 i = 0;
 
     /* We need to know which is the valid flash bank.  In the event
      * that we didn't allocate eeprom_shadow_ram, we may not be
@@ -8417,7 +8416,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
 
     for (i = 0; i < words; i++) {
         if (hw->eeprom_shadow_ram != NULL &&
-            hw->eeprom_shadow_ram[offset+i].modified == TRUE) {
+            hw->eeprom_shadow_ram[offset+i].modified) {
             data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
         } else {
             /* The NVM part needs a byte offset, hence * 2 */
@@ -8445,12 +8444,12 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
  * words - number of words to write
  * data - words to write to the EEPROM
  *****************************************************************************/
-static int32_t
-e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
-                        uint16_t *data)
+static s32
+e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+                        u16 *data)
 {
-    uint32_t i = 0;
-    int32_t error = E1000_SUCCESS;
+    u32 i = 0;
+    s32 error = E1000_SUCCESS;
 
     error = e1000_get_software_flag(hw);
     if (error != E1000_SUCCESS)
@@ -8466,7 +8465,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
     if (hw->eeprom_shadow_ram != NULL) {
         for (i = 0; i < words; i++) {
             if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
-                hw->eeprom_shadow_ram[offset+i].modified = TRUE;
+                hw->eeprom_shadow_ram[offset+i].modified = true;
                 hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
             } else {
                 error = -E1000_ERR_EEPROM;
@@ -8492,12 +8491,12 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
  *
  * hw - The pointer to the hw structure
  ****************************************************************************/
-static int32_t
+static s32
 e1000_ich8_cycle_init(struct e1000_hw *hw)
 {
     union ich8_hws_flash_status hsfsts;
-    int32_t error = E1000_ERR_EEPROM;
-    int32_t i     = 0;
+    s32 error = E1000_ERR_EEPROM;
+    s32 i     = 0;
 
     DEBUGFUNC("e1000_ich8_cycle_init");
 
@@ -8559,13 +8558,13 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
  *
  * hw - The pointer to the hw structure
  ****************************************************************************/
-static int32_t
-e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
+static s32
+e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
 {
     union ich8_hws_flash_ctrl hsflctl;
     union ich8_hws_flash_status hsfsts;
-    int32_t error = E1000_ERR_EEPROM;
-    uint32_t i = 0;
+    s32 error = E1000_ERR_EEPROM;
+    u32 i = 0;
 
     /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
     hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
@@ -8594,16 +8593,16 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
  * size - Size of data to read, 1=byte 2=word
  * data - Pointer to the word to store the value read.
  *****************************************************************************/
-static int32_t
-e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
-                     uint32_t size, uint16_t* data)
+static s32
+e1000_read_ich8_data(struct e1000_hw *hw, u32 index,
+                     u32 size, u16* data)
 {
     union ich8_hws_flash_status hsfsts;
     union ich8_hws_flash_ctrl hsflctl;
-    uint32_t flash_linear_address;
-    uint32_t flash_data = 0;
-    int32_t error = -E1000_ERR_EEPROM;
-    int32_t count = 0;
+    u32 flash_linear_address;
+    u32 flash_data = 0;
+    s32 error = -E1000_ERR_EEPROM;
+    s32 count = 0;
 
     DEBUGFUNC("e1000_read_ich8_data");
 
@@ -8641,9 +8640,9 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
         if (error == E1000_SUCCESS) {
             flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
             if (size == 1) {
-                *data = (uint8_t)(flash_data & 0x000000FF);
+                *data = (u8)(flash_data & 0x000000FF);
             } else if (size == 2) {
-                *data = (uint16_t)(flash_data & 0x0000FFFF);
+                *data = (u16)(flash_data & 0x0000FFFF);
             }
             break;
         } else {
@@ -8673,16 +8672,16 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
  * size - Size of data to read, 1=byte 2=word
  * data - The byte(s) to write to the NVM.
  *****************************************************************************/
-static int32_t
-e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
-                      uint16_t data)
+static s32
+e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+                      u16 data)
 {
     union ich8_hws_flash_status hsfsts;
     union ich8_hws_flash_ctrl hsflctl;
-    uint32_t flash_linear_address;
-    uint32_t flash_data = 0;
-    int32_t error = -E1000_ERR_EEPROM;
-    int32_t count = 0;
+    u32 flash_linear_address;
+    u32 flash_data = 0;
+    s32 error = -E1000_ERR_EEPROM;
+    s32 count = 0;
 
     DEBUGFUNC("e1000_write_ich8_data");
 
@@ -8711,9 +8710,9 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
         E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
 
         if (size == 1)
-            flash_data = (uint32_t)data & 0x00FF;
+            flash_data = (u32)data & 0x00FF;
         else
-            flash_data = (uint32_t)data;
+            flash_data = (u32)data;
 
         E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
 
@@ -8748,15 +8747,15 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
  * index - The index of the byte to read.
  * data - Pointer to a byte to store the value read.
  *****************************************************************************/
-static int32_t
-e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
+static s32
+e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8* data)
 {
-    int32_t status = E1000_SUCCESS;
-    uint16_t word = 0;
+    s32 status = E1000_SUCCESS;
+    u16 word = 0;
 
     status = e1000_read_ich8_data(hw, index, 1, &word);
     if (status == E1000_SUCCESS) {
-        *data = (uint8_t)word;
+        *data = (u8)word;
     }
 
     return status;
@@ -8771,11 +8770,11 @@ e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
  * index - The index of the byte to write.
  * byte - The byte to write to the NVM.
  *****************************************************************************/
-static int32_t
-e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
+static s32
+e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
 {
-    int32_t error = E1000_SUCCESS;
-    int32_t program_retries = 0;
+    s32 error = E1000_SUCCESS;
+    s32 program_retries = 0;
 
     DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
 
@@ -8804,11 +8803,11 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
  * index - The index of the byte to read.
  * data - The byte to write to the NVM.
  *****************************************************************************/
-static int32_t
-e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
+static s32
+e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
 {
-    int32_t status = E1000_SUCCESS;
-    uint16_t word = (uint16_t)data;
+    s32 status = E1000_SUCCESS;
+    u16 word = (u16)data;
 
     status = e1000_write_ich8_data(hw, index, 1, word);
 
@@ -8822,10 +8821,10 @@ e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
  * index - The starting byte index of the word to read.
  * data - Pointer to a word to store the value read.
  *****************************************************************************/
-static int32_t
-e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
+static s32
+e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
 {
-    int32_t status = E1000_SUCCESS;
+    s32 status = E1000_SUCCESS;
     status = e1000_read_ich8_data(hw, index, 2, data);
     return status;
 }
@@ -8841,19 +8840,19 @@ e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
  * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
  * bank size may be 4, 8 or 64 KBytes
  *****************************************************************************/
-static int32_t
-e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
+static s32
+e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
 {
     union ich8_hws_flash_status hsfsts;
     union ich8_hws_flash_ctrl hsflctl;
-    uint32_t flash_linear_address;
-    int32_t  count = 0;
-    int32_t  error = E1000_ERR_EEPROM;
-    int32_t  iteration;
-    int32_t  sub_sector_size = 0;
-    int32_t  bank_size;
-    int32_t  j = 0;
-    int32_t  error_flag = 0;
+    u32 flash_linear_address;
+    s32  count = 0;
+    s32  error = E1000_ERR_EEPROM;
+    s32  iteration;
+    s32  sub_sector_size = 0;
+    s32  bank_size;
+    s32  j = 0;
+    s32  error_flag = 0;
 
     hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
 
@@ -8931,16 +8930,16 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
     return error;
 }
 
-static int32_t
+static s32
 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
-                                      uint32_t cnf_base_addr, uint32_t cnf_size)
+                                      u32 cnf_base_addr, u32 cnf_size)
 {
-    uint32_t ret_val = E1000_SUCCESS;
-    uint16_t word_addr, reg_data, reg_addr;
-    uint16_t i;
+    u32 ret_val = E1000_SUCCESS;
+    u16 word_addr, reg_data, reg_addr;
+    u16 i;
 
     /* cnf_base_addr is in DWORD */
-    word_addr = (uint16_t)(cnf_base_addr << 1);
+    word_addr = (u16)(cnf_base_addr << 1);
 
     /* cnf_size is returned in size of dwords */
     for (i = 0; i < cnf_size; i++) {
@@ -8956,7 +8955,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
         if (ret_val != E1000_SUCCESS)
             return ret_val;
 
-        ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data);
+        ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
 
         e1000_release_software_flag(hw);
     }
@@ -8973,10 +8972,10 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
  *
  * hw: Struct containing variables accessed by shared code
  *****************************************************************************/
-static int32_t
+static s32
 e1000_init_lcd_from_nvm(struct e1000_hw *hw)
 {
-    uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop;
+    u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
 
     if (hw->phy_type != e1000_phy_igp_3)
           return E1000_SUCCESS;

+ 286 - 286
drivers/net/e1000/e1000_hw.h

@@ -100,8 +100,8 @@ typedef enum {
 } e1000_fc_type;
 
 struct e1000_shadow_ram {
-    uint16_t    eeprom_word;
-    boolean_t   modified;
+    u16 eeprom_word;
+    bool modified;
 };
 
 /* PCI bus types */
@@ -263,19 +263,19 @@ struct e1000_phy_info {
 };
 
 struct e1000_phy_stats {
-    uint32_t idle_errors;
-    uint32_t receive_errors;
+    u32 idle_errors;
+    u32 receive_errors;
 };
 
 struct e1000_eeprom_info {
     e1000_eeprom_type type;
-    uint16_t word_size;
-    uint16_t opcode_bits;
-    uint16_t address_bits;
-    uint16_t delay_usec;
-    uint16_t page_size;
-    boolean_t use_eerd;
-    boolean_t use_eewr;
+    u16 word_size;
+    u16 opcode_bits;
+    u16 address_bits;
+    u16 delay_usec;
+    u16 page_size;
+    bool use_eerd;
+    bool use_eewr;
 };
 
 /* Flex ASF Information */
@@ -308,34 +308,34 @@ typedef enum {
 
 /* Function prototypes */
 /* Initialization */
-int32_t e1000_reset_hw(struct e1000_hw *hw);
-int32_t e1000_init_hw(struct e1000_hw *hw);
-int32_t e1000_set_mac_type(struct e1000_hw *hw);
+s32 e1000_reset_hw(struct e1000_hw *hw);
+s32 e1000_init_hw(struct e1000_hw *hw);
+s32 e1000_set_mac_type(struct e1000_hw *hw);
 void e1000_set_media_type(struct e1000_hw *hw);
 
 /* Link Configuration */
-int32_t e1000_setup_link(struct e1000_hw *hw);
-int32_t e1000_phy_setup_autoneg(struct e1000_hw *hw);
+s32 e1000_setup_link(struct e1000_hw *hw);
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
 void e1000_config_collision_dist(struct e1000_hw *hw);
-int32_t e1000_check_for_link(struct e1000_hw *hw);
-int32_t e1000_get_speed_and_duplex(struct e1000_hw *hw, uint16_t *speed, uint16_t *duplex);
-int32_t e1000_force_mac_fc(struct e1000_hw *hw);
+s32 e1000_check_for_link(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_force_mac_fc(struct e1000_hw *hw);
 
 /* PHY */
-int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data);
-int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
-int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
-int32_t e1000_phy_reset(struct e1000_hw *hw);
-int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
-int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data);
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+s32 e1000_phy_hw_reset(struct e1000_hw *hw);
+s32 e1000_phy_reset(struct e1000_hw *hw);
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
 
 void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
 
 /* EEPROM Functions */
-int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
+s32 e1000_init_eeprom_params(struct e1000_hw *hw);
 
 /* MNG HOST IF functions */
-uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
 
 #define E1000_MNG_DHCP_TX_PAYLOAD_CMD   64
 #define E1000_HI_MAX_MNG_DATA_LENGTH    0x6F8   /* Host Interface data length */
@@ -354,80 +354,80 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
 #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK              0x1F
 
 struct e1000_host_mng_command_header {
-    uint8_t command_id;
-    uint8_t checksum;
-    uint16_t reserved1;
-    uint16_t reserved2;
-    uint16_t command_length;
+    u8 command_id;
+    u8 checksum;
+    u16 reserved1;
+    u16 reserved2;
+    u16 command_length;
 };
 
 struct e1000_host_mng_command_info {
     struct e1000_host_mng_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
-    uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH];   /* Command data can length 0..0x658*/
+    u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];   /* Command data can length 0..0x658*/
 };
 #ifdef __BIG_ENDIAN
 struct e1000_host_mng_dhcp_cookie{
-    uint32_t signature;
-    uint16_t vlan_id;
-    uint8_t reserved0;
-    uint8_t status;
-    uint32_t reserved1;
-    uint8_t checksum;
-    uint8_t reserved3;
-    uint16_t reserved2;
+    u32 signature;
+    u16 vlan_id;
+    u8 reserved0;
+    u8 status;
+    u32 reserved1;
+    u8 checksum;
+    u8 reserved3;
+    u16 reserved2;
 };
 #else
 struct e1000_host_mng_dhcp_cookie{
-    uint32_t signature;
-    uint8_t status;
-    uint8_t reserved0;
-    uint16_t vlan_id;
-    uint32_t reserved1;
-    uint16_t reserved2;
-    uint8_t reserved3;
-    uint8_t checksum;
+    u32 signature;
+    u8 status;
+    u8 reserved0;
+    u16 vlan_id;
+    u32 reserved1;
+    u16 reserved2;
+    u8 reserved3;
+    u8 checksum;
 };
 #endif
 
-int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
-                                  uint16_t length);
-boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
-boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
-int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
-int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
-int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
-int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
-int32_t e1000_read_mac_addr(struct e1000_hw * hw);
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
+                                  u16 length);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_read_mac_addr(struct e1000_hw * hw);
 
 /* Filters (multicast, vlan, receive) */
-uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
-void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
-void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
-void e1000_write_vfta(struct e1000_hw *hw, uint32_t offset, uint32_t value);
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
+void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
 
 /* LED functions */
-int32_t e1000_setup_led(struct e1000_hw *hw);
-int32_t e1000_cleanup_led(struct e1000_hw *hw);
-int32_t e1000_led_on(struct e1000_hw *hw);
-int32_t e1000_led_off(struct e1000_hw *hw);
-int32_t e1000_blink_led_start(struct e1000_hw *hw);
+s32 e1000_setup_led(struct e1000_hw *hw);
+s32 e1000_cleanup_led(struct e1000_hw *hw);
+s32 e1000_led_on(struct e1000_hw *hw);
+s32 e1000_led_off(struct e1000_hw *hw);
+s32 e1000_blink_led_start(struct e1000_hw *hw);
 
 /* Adaptive IFS Functions */
 
 /* Everything else */
 void e1000_reset_adaptive(struct e1000_hw *hw);
 void e1000_update_adaptive(struct e1000_hw *hw);
-void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, uint32_t frame_len, uint8_t * mac_addr);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr);
 void e1000_get_bus_info(struct e1000_hw *hw);
 void e1000_pci_set_mwi(struct e1000_hw *hw);
 void e1000_pci_clear_mwi(struct e1000_hw *hw);
-int32_t e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value);
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
 int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
 /* Port I/O is only supported on 82544 and newer */
-void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
-int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
-int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
+s32 e1000_disable_pciex_master(struct e1000_hw *hw);
+s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
 
 
 #define E1000_READ_REG_IO(a, reg) \
@@ -596,8 +596,8 @@ struct e1000_rx_desc {
     __le64 buffer_addr; /* Address of the descriptor's data buffer */
     __le16 length;     /* Length of data DMAed into data buffer */
     __le16 csum;       /* Packet checksum */
-    uint8_t status;      /* Descriptor status */
-    uint8_t errors;      /* Descriptor Errors */
+    u8 status;      /* Descriptor status */
+    u8 errors;      /* Descriptor Errors */
     __le16 special;
 };
 
@@ -718,15 +718,15 @@ struct e1000_tx_desc {
         __le32 data;
         struct {
             __le16 length;    /* Data buffer length */
-            uint8_t cso;        /* Checksum offset */
-            uint8_t cmd;        /* Descriptor control */
+            u8 cso;        /* Checksum offset */
+            u8 cmd;        /* Descriptor control */
         } flags;
     } lower;
     union {
         __le32 data;
         struct {
-            uint8_t status;     /* Descriptor status */
-            uint8_t css;        /* Checksum start */
+            u8 status;     /* Descriptor status */
+            u8 css;        /* Checksum start */
             __le16 special;
         } fields;
     } upper;
@@ -759,16 +759,16 @@ struct e1000_context_desc {
     union {
         __le32 ip_config;
         struct {
-            uint8_t ipcss;      /* IP checksum start */
-            uint8_t ipcso;      /* IP checksum offset */
+            u8 ipcss;      /* IP checksum start */
+            u8 ipcso;      /* IP checksum offset */
             __le16 ipcse;     /* IP checksum end */
         } ip_fields;
     } lower_setup;
     union {
         __le32 tcp_config;
         struct {
-            uint8_t tucss;      /* TCP checksum start */
-            uint8_t tucso;      /* TCP checksum offset */
+            u8 tucss;      /* TCP checksum start */
+            u8 tucso;      /* TCP checksum offset */
             __le16 tucse;     /* TCP checksum end */
         } tcp_fields;
     } upper_setup;
@@ -776,8 +776,8 @@ struct e1000_context_desc {
     union {
         __le32 data;
         struct {
-            uint8_t status;     /* Descriptor status */
-            uint8_t hdr_len;    /* Header length */
+            u8 status;     /* Descriptor status */
+            u8 hdr_len;    /* Header length */
             __le16 mss;       /* Maximum segment size */
         } fields;
     } tcp_seg_setup;
@@ -790,15 +790,15 @@ struct e1000_data_desc {
         __le32 data;
         struct {
             __le16 length;    /* Data buffer length */
-            uint8_t typ_len_ext;        /* */
-            uint8_t cmd;        /* */
+            u8 typ_len_ext;        /* */
+            u8 cmd;        /* */
         } flags;
     } lower;
     union {
         __le32 data;
         struct {
-            uint8_t status;     /* Descriptor status */
-            uint8_t popts;      /* Packet Options */
+            u8 status;     /* Descriptor status */
+            u8 popts;      /* Packet Options */
             __le16 special;   /* */
         } fields;
     } upper;
@@ -825,8 +825,8 @@ struct e1000_rar {
 
 /* IPv4 Address Table Entry */
 struct e1000_ipv4_at_entry {
-    volatile uint32_t ipv4_addr;        /* IP Address (RW) */
-    volatile uint32_t reserved;
+    volatile u32 ipv4_addr;        /* IP Address (RW) */
+    volatile u32 reserved;
 };
 
 /* Four wakeup IP addresses are supported */
@@ -837,25 +837,25 @@ struct e1000_ipv4_at_entry {
 
 /* IPv6 Address Table Entry */
 struct e1000_ipv6_at_entry {
-    volatile uint8_t ipv6_addr[16];
+    volatile u8 ipv6_addr[16];
 };
 
 /* Flexible Filter Length Table Entry */
 struct e1000_fflt_entry {
-    volatile uint32_t length;   /* Flexible Filter Length (RW) */
-    volatile uint32_t reserved;
+    volatile u32 length;   /* Flexible Filter Length (RW) */
+    volatile u32 reserved;
 };
 
 /* Flexible Filter Mask Table Entry */
 struct e1000_ffmt_entry {
-    volatile uint32_t mask;     /* Flexible Filter Mask (RW) */
-    volatile uint32_t reserved;
+    volatile u32 mask;     /* Flexible Filter Mask (RW) */
+    volatile u32 reserved;
 };
 
 /* Flexible Filter Value Table Entry */
 struct e1000_ffvt_entry {
-    volatile uint32_t value;    /* Flexible Filter Value (RW) */
-    volatile uint32_t reserved;
+    volatile u32 value;    /* Flexible Filter Value (RW) */
+    volatile u32 reserved;
 };
 
 /* Four Flexible Filters are supported */
@@ -1309,89 +1309,89 @@ struct e1000_ffvt_entry {
 
 /* Statistics counters collected by the MAC */
 struct e1000_hw_stats {
-	uint64_t		crcerrs;
-	uint64_t		algnerrc;
-	uint64_t		symerrs;
-	uint64_t		rxerrc;
-	uint64_t		txerrc;
-	uint64_t		mpc;
-	uint64_t		scc;
-	uint64_t		ecol;
-	uint64_t		mcc;
-	uint64_t		latecol;
-	uint64_t		colc;
-	uint64_t		dc;
-	uint64_t		tncrs;
-	uint64_t		sec;
-	uint64_t		cexterr;
-	uint64_t		rlec;
-	uint64_t		xonrxc;
-	uint64_t		xontxc;
-	uint64_t		xoffrxc;
-	uint64_t		xofftxc;
-	uint64_t		fcruc;
-	uint64_t		prc64;
-	uint64_t		prc127;
-	uint64_t		prc255;
-	uint64_t		prc511;
-	uint64_t		prc1023;
-	uint64_t		prc1522;
-	uint64_t		gprc;
-	uint64_t		bprc;
-	uint64_t		mprc;
-	uint64_t		gptc;
-	uint64_t		gorcl;
-	uint64_t		gorch;
-	uint64_t		gotcl;
-	uint64_t		gotch;
-	uint64_t		rnbc;
-	uint64_t		ruc;
-	uint64_t		rfc;
-	uint64_t		roc;
-	uint64_t		rlerrc;
-	uint64_t		rjc;
-	uint64_t		mgprc;
-	uint64_t		mgpdc;
-	uint64_t		mgptc;
-	uint64_t		torl;
-	uint64_t		torh;
-	uint64_t		totl;
-	uint64_t		toth;
-	uint64_t		tpr;
-	uint64_t		tpt;
-	uint64_t		ptc64;
-	uint64_t		ptc127;
-	uint64_t		ptc255;
-	uint64_t		ptc511;
-	uint64_t		ptc1023;
-	uint64_t		ptc1522;
-	uint64_t		mptc;
-	uint64_t		bptc;
-	uint64_t		tsctc;
-	uint64_t		tsctfc;
-	uint64_t		iac;
-	uint64_t		icrxptc;
-	uint64_t		icrxatc;
-	uint64_t		ictxptc;
-	uint64_t		ictxatc;
-	uint64_t		ictxqec;
-	uint64_t		ictxqmtc;
-	uint64_t		icrxdmtc;
-	uint64_t		icrxoc;
+	u64		crcerrs;
+	u64		algnerrc;
+	u64		symerrs;
+	u64		rxerrc;
+	u64		txerrc;
+	u64		mpc;
+	u64		scc;
+	u64		ecol;
+	u64		mcc;
+	u64		latecol;
+	u64		colc;
+	u64		dc;
+	u64		tncrs;
+	u64		sec;
+	u64		cexterr;
+	u64		rlec;
+	u64		xonrxc;
+	u64		xontxc;
+	u64		xoffrxc;
+	u64		xofftxc;
+	u64		fcruc;
+	u64		prc64;
+	u64		prc127;
+	u64		prc255;
+	u64		prc511;
+	u64		prc1023;
+	u64		prc1522;
+	u64		gprc;
+	u64		bprc;
+	u64		mprc;
+	u64		gptc;
+	u64		gorcl;
+	u64		gorch;
+	u64		gotcl;
+	u64		gotch;
+	u64		rnbc;
+	u64		ruc;
+	u64		rfc;
+	u64		roc;
+	u64		rlerrc;
+	u64		rjc;
+	u64		mgprc;
+	u64		mgpdc;
+	u64		mgptc;
+	u64		torl;
+	u64		torh;
+	u64		totl;
+	u64		toth;
+	u64		tpr;
+	u64		tpt;
+	u64		ptc64;
+	u64		ptc127;
+	u64		ptc255;
+	u64		ptc511;
+	u64		ptc1023;
+	u64		ptc1522;
+	u64		mptc;
+	u64		bptc;
+	u64		tsctc;
+	u64		tsctfc;
+	u64		iac;
+	u64		icrxptc;
+	u64		icrxatc;
+	u64		ictxptc;
+	u64		ictxatc;
+	u64		ictxqec;
+	u64		ictxqmtc;
+	u64		icrxdmtc;
+	u64		icrxoc;
 };
 
 /* Structure containing variables used by the shared code (e1000_hw.c) */
 struct e1000_hw {
-	uint8_t __iomem		*hw_addr;
-	uint8_t __iomem		*flash_address;
+	u8 __iomem		*hw_addr;
+	u8 __iomem		*flash_address;
 	e1000_mac_type		mac_type;
 	e1000_phy_type		phy_type;
-	uint32_t		phy_init_script;
+	u32		phy_init_script;
 	e1000_media_type	media_type;
 	void			*back;
 	struct e1000_shadow_ram	*eeprom_shadow_ram;
-	uint32_t		flash_bank_size;
-	uint32_t		flash_base_addr;
+	u32		flash_bank_size;
+	u32		flash_base_addr;
 	e1000_fc_type		fc;
 	e1000_bus_speed		bus_speed;
 	e1000_bus_width		bus_width;
@@ -1400,75 +1400,75 @@ struct e1000_hw {
 	e1000_ms_type		master_slave;
 	e1000_ms_type		original_master_slave;
 	e1000_ffe_config	ffe_config_state;
-	uint32_t		asf_firmware_present;
-	uint32_t		eeprom_semaphore_present;
-	uint32_t		swfw_sync_present;
-	uint32_t		swfwhw_semaphore_present;
+	u32		asf_firmware_present;
+	u32		eeprom_semaphore_present;
+	u32		swfw_sync_present;
+	u32		swfwhw_semaphore_present;
 	unsigned long		io_base;
-	uint32_t		phy_id;
-	uint32_t		phy_revision;
-	uint32_t		phy_addr;
-	uint32_t		original_fc;
-	uint32_t		txcw;
-	uint32_t		autoneg_failed;
-	uint32_t		max_frame_size;
-	uint32_t		min_frame_size;
-	uint32_t		mc_filter_type;
-	uint32_t		num_mc_addrs;
-	uint32_t		collision_delta;
-	uint32_t		tx_packet_delta;
-	uint32_t		ledctl_default;
-	uint32_t		ledctl_mode1;
-	uint32_t		ledctl_mode2;
-	boolean_t		tx_pkt_filtering;
+	u32		phy_id;
+	u32		phy_revision;
+	u32		phy_addr;
+	u32		original_fc;
+	u32		txcw;
+	u32		autoneg_failed;
+	u32		max_frame_size;
+	u32		min_frame_size;
+	u32		mc_filter_type;
+	u32		num_mc_addrs;
+	u32		collision_delta;
+	u32		tx_packet_delta;
+	u32		ledctl_default;
+	u32		ledctl_mode1;
+	u32		ledctl_mode2;
+	bool			tx_pkt_filtering;
 	struct e1000_host_mng_dhcp_cookie mng_cookie;
-	uint16_t		phy_spd_default;
-	uint16_t		autoneg_advertised;
-	uint16_t		pci_cmd_word;
-	uint16_t		fc_high_water;
-	uint16_t		fc_low_water;
-	uint16_t		fc_pause_time;
-	uint16_t		current_ifs_val;
-	uint16_t		ifs_min_val;
-	uint16_t		ifs_max_val;
-	uint16_t		ifs_step_size;
-	uint16_t		ifs_ratio;
-	uint16_t		device_id;
-	uint16_t		vendor_id;
-	uint16_t		subsystem_id;
-	uint16_t		subsystem_vendor_id;
-	uint8_t			revision_id;
-	uint8_t			autoneg;
-	uint8_t			mdix;
-	uint8_t			forced_speed_duplex;
-	uint8_t			wait_autoneg_complete;
-	uint8_t			dma_fairness;
-	uint8_t			mac_addr[NODE_ADDRESS_SIZE];
-	uint8_t			perm_mac_addr[NODE_ADDRESS_SIZE];
-	boolean_t		disable_polarity_correction;
-	boolean_t		speed_downgraded;
+	u16		phy_spd_default;
+	u16		autoneg_advertised;
+	u16		pci_cmd_word;
+	u16		fc_high_water;
+	u16		fc_low_water;
+	u16		fc_pause_time;
+	u16		current_ifs_val;
+	u16		ifs_min_val;
+	u16		ifs_max_val;
+	u16		ifs_step_size;
+	u16		ifs_ratio;
+	u16		device_id;
+	u16		vendor_id;
+	u16		subsystem_id;
+	u16		subsystem_vendor_id;
+	u8			revision_id;
+	u8			autoneg;
+	u8			mdix;
+	u8			forced_speed_duplex;
+	u8			wait_autoneg_complete;
+	u8			dma_fairness;
+	u8			mac_addr[NODE_ADDRESS_SIZE];
+	u8			perm_mac_addr[NODE_ADDRESS_SIZE];
+	bool			disable_polarity_correction;
+	bool			speed_downgraded;
 	e1000_smart_speed	smart_speed;
 	e1000_dsp_config	dsp_config_state;
-	boolean_t		get_link_status;
-	boolean_t		serdes_link_down;
-	boolean_t		tbi_compatibility_en;
-	boolean_t		tbi_compatibility_on;
-	boolean_t		laa_is_present;
-	boolean_t		phy_reset_disable;
-	boolean_t		initialize_hw_bits_disable;
-	boolean_t		fc_send_xon;
-	boolean_t		fc_strict_ieee;
-	boolean_t		report_tx_early;
-	boolean_t		adaptive_ifs;
-	boolean_t		ifs_params_forced;
-	boolean_t		in_ifs_mode;
-	boolean_t		mng_reg_access_disabled;
-	boolean_t		leave_av_bit_off;
-	boolean_t		kmrn_lock_loss_workaround_disabled;
-	boolean_t		bad_tx_carr_stats_fd;
-	boolean_t		has_manc2h;
-	boolean_t		rx_needs_kicking;
-	boolean_t		has_smbus;
+	bool			get_link_status;
+	bool			serdes_link_down;
+	bool			tbi_compatibility_en;
+	bool			tbi_compatibility_on;
+	bool			laa_is_present;
+	bool			phy_reset_disable;
+	bool			initialize_hw_bits_disable;
+	bool			fc_send_xon;
+	bool			fc_strict_ieee;
+	bool			report_tx_early;
+	bool			adaptive_ifs;
+	bool			ifs_params_forced;
+	bool			in_ifs_mode;
+	bool			mng_reg_access_disabled;
+	bool			leave_av_bit_off;
+	bool			kmrn_lock_loss_workaround_disabled;
+	bool			bad_tx_carr_stats_fd;
+	bool			has_manc2h;
+	bool			rx_needs_kicking;
+	bool			has_smbus;
 };
 
 
@@ -2165,14 +2165,14 @@ typedef enum {
 #define E1000_HI_COMMAND_TIMEOUT         500 /* Time in ms to process HI command */
 
 struct e1000_host_command_header {
-    uint8_t command_id;
-    uint8_t command_length;
-    uint8_t command_options;   /* I/F bits for command, status for return */
-    uint8_t checksum;
+    u8 command_id;
+    u8 command_length;
+    u8 command_options;   /* I/F bits for command, status for return */
+    u8 checksum;
 };
 struct e1000_host_command_info {
     struct e1000_host_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
-    uint8_t command_data[E1000_HI_MAX_DATA_LENGTH];   /* Command data can length 0..252 */
+    u8 command_data[E1000_HI_MAX_DATA_LENGTH];   /* Command data can length 0..252 */
 };
 
 /* Host SMB register #0 */
@@ -2495,7 +2495,7 @@ struct e1000_host_command_info {
 /* Number of milliseconds we wait for PHY configuration done after MAC reset */
 #define PHY_CFG_TIMEOUT             100
 
-#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
+#define E1000_TX_BUFFER_SIZE ((u32)1514)
 
 /* The carrier extension symbol, as received by the NIC. */
 #define CARRIER_EXTENSION   0x0F
@@ -2518,11 +2518,11 @@ struct e1000_host_command_info {
  * Typical use:
  *  ...
  *  if (TBI_ACCEPT) {
- *      accept_frame = TRUE;
+ *      accept_frame = true;
  *      e1000_tbi_adjust_stats(adapter, MacAddress);
  *      frame_length--;
  *  } else {
- *      accept_frame = FALSE;
+ *      accept_frame = false;
  *  }
  *  ...
  */
@@ -3312,68 +3312,68 @@ struct e1000_host_command_info {
 /* Offset 04h HSFSTS */
 union ich8_hws_flash_status {
     struct ich8_hsfsts {
-#ifdef E1000_BIG_ENDIAN
-        uint16_t reserved2      :6;
-        uint16_t fldesvalid     :1;
-        uint16_t flockdn        :1;
-        uint16_t flcdone        :1;
-        uint16_t flcerr         :1;
-        uint16_t dael           :1;
-        uint16_t berasesz       :2;
-        uint16_t flcinprog      :1;
-        uint16_t reserved1      :2;
+#ifdef __BIG_ENDIAN
+        u16 reserved2      :6;
+        u16 fldesvalid     :1;
+        u16 flockdn        :1;
+        u16 flcdone        :1;
+        u16 flcerr         :1;
+        u16 dael           :1;
+        u16 berasesz       :2;
+        u16 flcinprog      :1;
+        u16 reserved1      :2;
 #else
-        uint16_t flcdone        :1;   /* bit 0 Flash Cycle Done */
-        uint16_t flcerr         :1;   /* bit 1 Flash Cycle Error */
-        uint16_t dael           :1;   /* bit 2 Direct Access error Log */
-        uint16_t berasesz       :2;   /* bit 4:3 Block/Sector Erase Size */
-        uint16_t flcinprog      :1;   /* bit 5 flash SPI cycle in Progress */
-        uint16_t reserved1      :2;   /* bit 13:6 Reserved */
-        uint16_t reserved2      :6;   /* bit 13:6 Reserved */
-        uint16_t fldesvalid     :1;   /* bit 14 Flash Descriptor Valid */
-        uint16_t flockdn        :1;   /* bit 15 Flash Configuration Lock-Down */
+        u16 flcdone        :1;   /* bit 0 Flash Cycle Done */
+        u16 flcerr         :1;   /* bit 1 Flash Cycle Error */
+        u16 dael           :1;   /* bit 2 Direct Access error Log */
+        u16 berasesz       :2;   /* bit 4:3 Block/Sector Erase Size */
+        u16 flcinprog      :1;   /* bit 5 flash SPI cycle in Progress */
+        u16 reserved1      :2;   /* bit 13:6 Reserved */
+        u16 reserved2      :6;   /* bit 13:6 Reserved */
+        u16 fldesvalid     :1;   /* bit 14 Flash Descriptor Valid */
+        u16 flockdn        :1;   /* bit 15 Flash Configuration Lock-Down */
 #endif
     } hsf_status;
-    uint16_t regval;
+    u16 regval;
 };
 
 /* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
 /* Offset 06h FLCTL */
 union ich8_hws_flash_ctrl {
     struct ich8_hsflctl {
-#ifdef E1000_BIG_ENDIAN
-        uint16_t fldbcount      :2;
-        uint16_t flockdn        :6;
-        uint16_t flcgo          :1;
-        uint16_t flcycle        :2;
-        uint16_t reserved       :5;
+#ifdef __BIG_ENDIAN
+        u16 fldbcount      :2;
+        u16 flockdn        :6;
+        u16 flcgo          :1;
+        u16 flcycle        :2;
+        u16 reserved       :5;
 #else
-        uint16_t flcgo          :1;   /* 0 Flash Cycle Go */
-        uint16_t flcycle        :2;   /* 2:1 Flash Cycle */
-        uint16_t reserved       :5;   /* 7:3 Reserved  */
-        uint16_t fldbcount      :2;   /* 9:8 Flash Data Byte Count */
-        uint16_t flockdn        :6;   /* 15:10 Reserved */
+        u16 flcgo          :1;   /* 0 Flash Cycle Go */
+        u16 flcycle        :2;   /* 2:1 Flash Cycle */
+        u16 reserved       :5;   /* 7:3 Reserved  */
+        u16 fldbcount      :2;   /* 9:8 Flash Data Byte Count */
+        u16 flockdn        :6;   /* 15:10 Reserved */
 #endif
     } hsf_ctrl;
-    uint16_t regval;
+    u16 regval;
 };
 
 /* ICH8 Flash Region Access Permissions */
 union ich8_hws_flash_regacc {
     struct ich8_flracc {
-#ifdef E1000_BIG_ENDIAN
-        uint32_t gmwag          :8;
-        uint32_t gmrag          :8;
-        uint32_t grwa           :8;
-        uint32_t grra           :8;
+#ifdef __BIG_ENDIAN
+        u32 gmwag          :8;
+        u32 gmrag          :8;
+        u32 grwa           :8;
+        u32 grra           :8;
 #else
-        uint32_t grra           :8;   /* 0:7 GbE region Read Access */
-        uint32_t grwa           :8;   /* 8:15 GbE region Write Access */
-        uint32_t gmrag          :8;   /* 23:16 GbE Master Read Access Grant  */
-        uint32_t gmwag          :8;   /* 31:24 GbE Master Write Access Grant */
+        u32 grra           :8;   /* 0:7 GbE region Read Access */
+        u32 grwa           :8;   /* 8:15 GbE region Write Access */
+        u32 gmrag          :8;   /* 23:16 GbE Master Read Access Grant  */
+        u32 gmwag          :8;   /* 31:24 GbE Master Write Access Grant */
 #endif
     } hsf_flregacc;
-    uint16_t regval;
+    u16 regval;
 };
 
 /* Miscellaneous PHY bit definitions. */

+ 147 - 158
drivers/net/e1000/e1000_main.c

@@ -127,7 +127,7 @@ int e1000_up(struct e1000_adapter *adapter);
 void e1000_down(struct e1000_adapter *adapter);
 void e1000_reinit_locked(struct e1000_adapter *adapter);
 void e1000_reset(struct e1000_adapter *adapter);
-int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
@@ -169,21 +169,21 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 static int e1000_set_mac(struct net_device *netdev, void *p);
 static irqreturn_t e1000_intr(int irq, void *data);
 static irqreturn_t e1000_intr_msi(int irq, void *data);
-static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
-                                    struct e1000_tx_ring *tx_ring);
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+			       struct e1000_tx_ring *tx_ring);
 #ifdef CONFIG_E1000_NAPI
 static int e1000_clean(struct napi_struct *napi, int budget);
-static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
-                                    struct e1000_rx_ring *rx_ring,
-                                    int *work_done, int work_to_do);
-static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
-                                       struct e1000_rx_ring *rx_ring,
-                                       int *work_done, int work_to_do);
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       int *work_done, int work_to_do);
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+				  struct e1000_rx_ring *rx_ring,
+				  int *work_done, int work_to_do);
 #else
-static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
-                                    struct e1000_rx_ring *rx_ring);
-static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
-                                       struct e1000_rx_ring *rx_ring);
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring);
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+				  struct e1000_rx_ring *rx_ring);
 #endif
 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                                    struct e1000_rx_ring *rx_ring,
@@ -203,8 +203,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
                                        struct sk_buff *skb);
 
 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void e1000_restore_vlan(struct e1000_adapter *adapter);
 
 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
@@ -347,7 +347,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
 static void
 e1000_irq_disable(struct e1000_adapter *adapter)
 {
-	atomic_inc(&adapter->irq_sem);
 	E1000_WRITE_REG(&adapter->hw, IMC, ~0);
 	E1000_WRITE_FLUSH(&adapter->hw);
 	synchronize_irq(adapter->pdev->irq);
@@ -361,18 +360,16 @@ e1000_irq_disable(struct e1000_adapter *adapter)
 static void
 e1000_irq_enable(struct e1000_adapter *adapter)
 {
-	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
-		E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
-		E1000_WRITE_FLUSH(&adapter->hw);
-	}
+	E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
+	E1000_WRITE_FLUSH(&adapter->hw);
 }
 
 static void
 e1000_update_mng_vlan(struct e1000_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
-	uint16_t vid = adapter->hw.mng_cookie.vlan_id;
-	uint16_t old_vid = adapter->mng_vlan_id;
+	u16 vid = adapter->hw.mng_cookie.vlan_id;
+	u16 old_vid = adapter->mng_vlan_id;
 	if (adapter->vlgrp) {
 		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
 			if (adapter->hw.mng_cookie.status &
@@ -382,7 +379,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
 			} else
 				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 
-			if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
+			if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 					(vid != old_vid) &&
 			    !vlan_group_get_device(adapter->vlgrp, old_vid))
 				e1000_vlan_rx_kill_vid(netdev, old_vid);
@@ -405,8 +402,8 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
 static void
 e1000_release_hw_control(struct e1000_adapter *adapter)
 {
-	uint32_t ctrl_ext;
-	uint32_t swsm;
+	u32 ctrl_ext;
+	u32 swsm;
 
 	/* Let firmware taken over control of h/w */
 	switch (adapter->hw.mac_type) {
@@ -442,8 +439,8 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
 static void
 e1000_get_hw_control(struct e1000_adapter *adapter)
 {
-	uint32_t ctrl_ext;
-	uint32_t swsm;
+	u32 ctrl_ext;
+	u32 swsm;
 
 	/* Let firmware know the driver has taken over */
 	switch (adapter->hw.mac_type) {
@@ -469,7 +466,7 @@ static void
 e1000_init_manageability(struct e1000_adapter *adapter)
 {
 	if (adapter->en_mng_pt) {
-		uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
+		u32 manc = E1000_READ_REG(&adapter->hw, MANC);
 
 		/* disable hardware interception of ARP */
 		manc &= ~(E1000_MANC_ARP_EN);
@@ -478,7 +475,7 @@ e1000_init_manageability(struct e1000_adapter *adapter)
 		/* this will probably generate destination unreachable messages
 		 * from the host OS, but the packets will be handled on SMBUS */
 		if (adapter->hw.has_manc2h) {
-			uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
+			u32 manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
 
 			manc |= E1000_MANC_EN_MNG2HOST;
 #define E1000_MNG2HOST_PORT_623 (1 << 5)
@@ -496,7 +493,7 @@ static void
 e1000_release_manageability(struct e1000_adapter *adapter)
 {
 	if (adapter->en_mng_pt) {
-		uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
+		u32 manc = E1000_READ_REG(&adapter->hw, MANC);
 
 		/* re-enable hardware interception of ARP */
 		manc |= E1000_MANC_ARP_EN;
@@ -569,7 +566,7 @@ int e1000_up(struct e1000_adapter *adapter)
 
 void e1000_power_up_phy(struct e1000_adapter *adapter)
 {
-	uint16_t mii_reg = 0;
+	u16 mii_reg = 0;
 
 	/* Just clear the power down bit to wake the phy back up */
 	if (adapter->hw.media_type == e1000_media_type_copper) {
@@ -584,13 +581,13 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
 static void e1000_power_down_phy(struct e1000_adapter *adapter)
 {
 	/* Power down the PHY so no link is implied when interface is down *
-	 * The PHY cannot be powered down if any of the following is TRUE *
+	 * The PHY cannot be powered down if any of the following is true *
 	 * (a) WoL is enabled
 	 * (b) AMT is active
 	 * (c) SoL/IDER session is active */
 	if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
 	   adapter->hw.media_type == e1000_media_type_copper) {
-		uint16_t mii_reg = 0;
+		u16 mii_reg = 0;
 
 		switch (adapter->hw.mac_type) {
 		case e1000_82540:
@@ -638,7 +635,6 @@ e1000_down(struct e1000_adapter *adapter)
 
 #ifdef CONFIG_E1000_NAPI
 	napi_disable(&adapter->napi);
-	atomic_set(&adapter->irq_sem, 0);
 #endif
 	e1000_irq_disable(adapter);
 
@@ -671,9 +667,9 @@ e1000_reinit_locked(struct e1000_adapter *adapter)
 void
 e1000_reset(struct e1000_adapter *adapter)
 {
-	uint32_t pba = 0, tx_space, min_tx_space, min_rx_space;
-	uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
-	boolean_t legacy_pba_adjust = FALSE;
+	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
+	u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
+	bool legacy_pba_adjust = false;
 
 	/* Repartition Pba for greater than 9k mtu
 	 * To take effect CTRL.RST is required.
@@ -687,7 +683,7 @@ e1000_reset(struct e1000_adapter *adapter)
 	case e1000_82540:
 	case e1000_82541:
 	case e1000_82541_rev_2:
-		legacy_pba_adjust = TRUE;
+		legacy_pba_adjust = true;
 		pba = E1000_PBA_48K;
 		break;
 	case e1000_82545:
@@ -698,7 +694,7 @@ e1000_reset(struct e1000_adapter *adapter)
 		break;
 	case e1000_82547:
 	case e1000_82547_rev_2:
-		legacy_pba_adjust = TRUE;
+		legacy_pba_adjust = true;
 		pba = E1000_PBA_30K;
 		break;
 	case e1000_82571:
@@ -716,7 +712,7 @@ e1000_reset(struct e1000_adapter *adapter)
 		break;
 	}
 
-	if (legacy_pba_adjust == TRUE) {
+	if (legacy_pba_adjust) {
 		if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
 			pba -= 8; /* allocate more FIFO for Tx */
 
@@ -819,7 +815,7 @@ e1000_reset(struct e1000_adapter *adapter)
 	    adapter->hw.mac_type <= e1000_82547_rev_2 &&
 	    adapter->hw.autoneg == 1 &&
 	    adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) {
-		uint32_t ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+		u32 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
 		/* clear phy power management bit if we are in gig only mode,
 		 * which if enabled will attempt negotiation to 100Mb, which
 		 * can cause a loss of link at power off or driver unload */
@@ -836,7 +832,7 @@ e1000_reset(struct e1000_adapter *adapter)
 	if (!adapter->smart_power_down &&
 	    (adapter->hw.mac_type == e1000_82571 ||
 	     adapter->hw.mac_type == e1000_82572)) {
-		uint16_t phy_data = 0;
+		u16 phy_data = 0;
 		/* speed up time to link by disabling smart power down, ignore
 		 * the return value of this function because there is nothing
 		 * different we would do if it failed */
@@ -930,8 +926,8 @@ e1000_probe(struct pci_dev *pdev,
 	static int cards_found = 0;
 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
 	int i, err, pci_using_dac;
-	uint16_t eeprom_data = 0;
-	uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
+	u16 eeprom_data = 0;
+	u16 eeprom_apme_mask = E1000_EEPROM_APME;
 	DECLARE_MAC_BUF(mac);
 
 	if ((err = pci_enable_device(pdev)))
@@ -1366,15 +1362,15 @@ e1000_sw_init(struct e1000_adapter *adapter)
 
 	e1000_set_media_type(hw);
 
-	hw->wait_autoneg_complete = FALSE;
-	hw->tbi_compatibility_en = TRUE;
-	hw->adaptive_ifs = TRUE;
+	hw->wait_autoneg_complete = false;
+	hw->tbi_compatibility_en = true;
+	hw->adaptive_ifs = true;
 
 	/* Copper options */
 
 	if (hw->media_type == e1000_media_type_copper) {
 		hw->mdix = AUTO_ALL_MODES;
-		hw->disable_polarity_correction = FALSE;
+		hw->disable_polarity_correction = false;
 		hw->master_slave = E1000_MASTER_SLAVE;
 	}
 
@@ -1396,7 +1392,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
 #endif
 
 	/* Explicitly disable IRQ since the NIC can be in any state. */
-	atomic_set(&adapter->irq_sem, 0);
 	e1000_irq_disable(adapter);
 
 	spin_lock_init(&adapter->stats_lock);
@@ -1576,7 +1571,7 @@ e1000_close(struct net_device *netdev)
  * @start: address of beginning of memory
  * @len: length of memory
  **/
-static boolean_t
+static bool
 e1000_check_64k_bound(struct e1000_adapter *adapter,
 		      void *start, unsigned long len)
 {
@@ -1587,10 +1582,10 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
 	 * write location to cross 64k boundary due to errata 23 */
 	if (adapter->hw.mac_type == e1000_82545 ||
 	    adapter->hw.mac_type == e1000_82546) {
-		return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
+		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
 	}
 
-	return TRUE;
+	return true;
 }
 
 /**
@@ -1707,10 +1702,10 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
 static void
 e1000_configure_tx(struct e1000_adapter *adapter)
 {
-	uint64_t tdba;
+	u64 tdba;
 	struct e1000_hw *hw = &adapter->hw;
-	uint32_t tdlen, tctl, tipg, tarc;
-	uint32_t ipgr1, ipgr2;
+	u32 tdlen, tctl, tipg, tarc;
+	u32 ipgr1, ipgr2;
 
 	/* Setup the HW Tx Head and Tail descriptor pointers */
 
@@ -1952,10 +1947,10 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
 static void
 e1000_setup_rctl(struct e1000_adapter *adapter)
 {
-	uint32_t rctl, rfctl;
-	uint32_t psrctl = 0;
+	u32 rctl, rfctl;
+	u32 psrctl = 0;
 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
-	uint32_t pages = 0;
+	u32 pages = 0;
 #endif
 
 	rctl = E1000_READ_REG(&adapter->hw, RCTL);
@@ -2070,9 +2065,9 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
 static void
 e1000_configure_rx(struct e1000_adapter *adapter)
 {
-	uint64_t rdba;
+	u64 rdba;
 	struct e1000_hw *hw = &adapter->hw;
-	uint32_t rdlen, rctl, rxcsum, ctrl_ext;
+	u32 rdlen, rctl, rxcsum, ctrl_ext;
 
 	if (adapter->rx_ps_pages) {
 		/* this is a 32 byte descriptor */
@@ -2133,7 +2128,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
 	if (hw->mac_type >= e1000_82543) {
 		rxcsum = E1000_READ_REG(hw, RXCSUM);
-		if (adapter->rx_csum == TRUE) {
+		if (adapter->rx_csum) {
 			rxcsum |= E1000_RXCSUM_TUOFL;
 
 			/* Enable 82571 IPv4 payload checksum for UDP fragments
@@ -2392,7 +2387,7 @@ static void
 e1000_enter_82542_rst(struct e1000_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
-	uint32_t rctl;
+	u32 rctl;
 
 	e1000_pci_clear_mwi(&adapter->hw);
 
@@ -2410,7 +2405,7 @@ static void
 e1000_leave_82542_rst(struct e1000_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
-	uint32_t rctl;
+	u32 rctl;
 
 	rctl = E1000_READ_REG(&adapter->hw, RCTL);
 	rctl &= ~E1000_RCTL_RST;
@@ -2495,8 +2490,8 @@ e1000_set_rx_mode(struct net_device *netdev)
 	struct e1000_hw *hw = &adapter->hw;
 	struct dev_addr_list *uc_ptr;
 	struct dev_addr_list *mc_ptr;
-	uint32_t rctl;
-	uint32_t hash_value;
+	u32 rctl;
+	u32 hash_value;
 	int i, rar_entries = E1000_RAR_ENTRIES;
 	int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
 				E1000_NUM_MTA_REGISTERS_ICH8LAN :
@@ -2600,7 +2595,7 @@ e1000_82547_tx_fifo_stall(unsigned long data)
 {
 	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
 	struct net_device *netdev = adapter->netdev;
-	uint32_t tctl;
+	u32 tctl;
 
 	if (atomic_read(&adapter->tx_fifo_stall)) {
 		if ((E1000_READ_REG(&adapter->hw, TDT) ==
@@ -2642,8 +2637,8 @@ e1000_watchdog(unsigned long data)
 	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
 	struct net_device *netdev = adapter->netdev;
 	struct e1000_tx_ring *txdr = adapter->tx_ring;
-	uint32_t link, tctl;
-	int32_t ret_val;
+	u32 link, tctl;
+	s32 ret_val;
 
 	ret_val = e1000_check_for_link(&adapter->hw);
 	if ((ret_val == E1000_ERR_PHY) &&
@@ -2668,8 +2663,8 @@ e1000_watchdog(unsigned long data)
 
 	if (link) {
 		if (!netif_carrier_ok(netdev)) {
-			uint32_t ctrl;
-			boolean_t txb2b = 1;
+			u32 ctrl;
+			bool txb2b = true;
 			e1000_get_speed_and_duplex(&adapter->hw,
 			                           &adapter->link_speed,
 			                           &adapter->link_duplex);
@@ -2691,12 +2686,12 @@ e1000_watchdog(unsigned long data)
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
-				txb2b = 0;
+				txb2b = false;
 				netdev->tx_queue_len = 10;
 				adapter->tx_timeout_factor = 8;
 				break;
 			case SPEED_100:
-				txb2b = 0;
+				txb2b = false;
 				netdev->tx_queue_len = 100;
 				/* maybe add some timeout factor ? */
 				break;
@@ -2704,8 +2699,8 @@ e1000_watchdog(unsigned long data)
 
 			if ((adapter->hw.mac_type == e1000_82571 ||
 			     adapter->hw.mac_type == e1000_82572) &&
-			    txb2b == 0) {
-				uint32_t tarc0;
+			    !txb2b) {
+				u32 tarc0;
 				tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
 				tarc0 &= ~(1 << 21);
 				E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
@@ -2747,7 +2742,7 @@ e1000_watchdog(unsigned long data)
 			/* make sure the receive unit is started */
 			if (adapter->hw.rx_needs_kicking) {
 				struct e1000_hw *hw = &adapter->hw;
-				uint32_t rctl = E1000_READ_REG(hw, RCTL);
+				u32 rctl = E1000_READ_REG(hw, RCTL);
 				E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN);
 			}
 		}
@@ -2802,7 +2797,7 @@ e1000_watchdog(unsigned long data)
 	E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
 
 	/* Force detection of hung controller every watchdog period */
-	adapter->detect_tx_hung = TRUE;
+	adapter->detect_tx_hung = true;
 
 	/* With 82571 controllers, LAA may be overwritten due to controller
 	 * reset from the other port. Set the appropriate LAA in RAR[0] */
@@ -2837,7 +2832,7 @@ enum latency_range {
  * @bytes: the number of bytes during this measurement interval
  **/
 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
-                                   uint16_t itr_setting,
+                                   u16 itr_setting,
                                    int packets,
                                    int bytes)
 {
@@ -2889,8 +2884,8 @@ update_itr_done:
 static void e1000_set_itr(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	uint16_t current_itr;
-	uint32_t new_itr = adapter->itr;
+	u16 current_itr;
+	u32 new_itr = adapter->itr;
 
 	if (unlikely(hw->mac_type < e1000_82540))
 		return;
@@ -2964,9 +2959,9 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 	struct e1000_context_desc *context_desc;
 	struct e1000_buffer *buffer_info;
 	unsigned int i;
-	uint32_t cmd_length = 0;
-	uint16_t ipcse = 0, tucse, mss;
-	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
+	u32 cmd_length = 0;
+	u16 ipcse = 0, tucse, mss;
+	u8 ipcss, ipcso, tucss, tucso, hdr_len;
 	int err;
 
 	if (skb_is_gso(skb)) {
@@ -3025,19 +3020,19 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 		if (++i == tx_ring->count) i = 0;
 		tx_ring->next_to_use = i;
 
-		return TRUE;
+		return true;
 	}
-	return FALSE;
+	return false;
 }
 
-static boolean_t
+static bool
 e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
               struct sk_buff *skb)
 {
 	struct e1000_context_desc *context_desc;
 	struct e1000_buffer *buffer_info;
 	unsigned int i;
-	uint8_t css;
+	u8 css;
 
 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
 		css = skb_transport_offset(skb);
@@ -3060,10 +3055,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 		if (unlikely(++i == tx_ring->count)) i = 0;
 		tx_ring->next_to_use = i;
 
-		return TRUE;
+		return true;
 	}
 
-	return FALSE;
+	return false;
 }
 
 #define E1000_MAX_TXD_PWR	12
@@ -3182,7 +3177,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 {
 	struct e1000_tx_desc *tx_desc = NULL;
 	struct e1000_buffer *buffer_info;
-	uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
 	unsigned int i;
 
 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
@@ -3246,8 +3241,8 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 static int
 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
 {
-	uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
-	uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
+	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
 
 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
 
@@ -3274,7 +3269,7 @@ static int
 e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
 {
 	struct e1000_hw *hw =  &adapter->hw;
-	uint16_t length, offset;
+	u16 length, offset;
 	if (vlan_tx_tag_present(skb)) {
 		if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
 			( adapter->hw.mng_cookie.status &
@@ -3285,17 +3280,17 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
 		struct ethhdr *eth = (struct ethhdr *) skb->data;
 		if ((htons(ETH_P_IP) == eth->h_proto)) {
 			const struct iphdr *ip =
-				(struct iphdr *)((uint8_t *)skb->data+14);
+				(struct iphdr *)((u8 *)skb->data+14);
 			if (IPPROTO_UDP == ip->protocol) {
 				struct udphdr *udp =
-					(struct udphdr *)((uint8_t *)ip +
+					(struct udphdr *)((u8 *)ip +
 						(ip->ihl << 2));
 				if (ntohs(udp->dest) == 67) {
-					offset = (uint8_t *)udp + 8 - skb->data;
+					offset = (u8 *)udp + 8 - skb->data;
 					length = skb->len - offset;
 
 					return e1000_mng_write_dhcp_info(hw,
-							(uint8_t *)udp + 8,
+							(u8 *)udp + 8,
 							length);
 				}
 			}
@@ -3375,7 +3370,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	 * overrun the FIFO, adjust the max buffer len if mss
 	 * drops. */
 	if (mss) {
-		uint8_t hdr_len;
+		u8 hdr_len;
 		max_per_txd = min(mss << 2, max_per_txd);
 		max_txd_pwr = fls(max_per_txd) - 1;
 
@@ -3562,7 +3557,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
-	uint16_t eeprom_data = 0;
+	u16 eeprom_data = 0;
 
 	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
 	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3657,7 +3652,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
 	struct e1000_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
 	unsigned long flags;
-	uint16_t phy_tmp;
+	u16 phy_tmp;
 
 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
 
@@ -3834,13 +3829,10 @@ e1000_intr_msi(int irq, void *data)
 #ifndef CONFIG_E1000_NAPI
 	int i;
 #endif
-	uint32_t icr = E1000_READ_REG(hw, ICR);
+	u32 icr = E1000_READ_REG(hw, ICR);
+
+	/* in NAPI mode read ICR disables interrupts using IAM */
 
-#ifdef CONFIG_E1000_NAPI
-	/* read ICR disables interrupts using IAM, so keep up with our
-	 * enable/disable accounting */
-	atomic_inc(&adapter->irq_sem);
-#endif
 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
 		hw->get_link_status = 1;
 		/* 80003ES2LAN workaround-- For packet buffer work-around on
@@ -3849,7 +3841,7 @@ e1000_intr_msi(int irq, void *data)
 		if (netif_carrier_ok(netdev) &&
 		    (adapter->hw.mac_type == e1000_80003es2lan)) {
 			/* disable receives */
-			uint32_t rctl = E1000_READ_REG(hw, RCTL);
+			u32 rctl = E1000_READ_REG(hw, RCTL);
 			E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
 		}
 		/* guard against interrupt when we're going down */
@@ -3896,7 +3888,7 @@ e1000_intr(int irq, void *data)
 	struct net_device *netdev = data;
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
+	u32 rctl, icr = E1000_READ_REG(hw, ICR);
 #ifndef CONFIG_E1000_NAPI
 	int i;
 #endif
@@ -3910,12 +3902,8 @@ e1000_intr(int irq, void *data)
 	             !(icr & E1000_ICR_INT_ASSERTED)))
 		return IRQ_NONE;
 
-	/* Interrupt Auto-Mask...upon reading ICR,
-	 * interrupts are masked.  No need for the
-	 * IMC write, but it does mean we should
-	 * account for it ASAP. */
-	if (likely(hw->mac_type >= e1000_82571))
-		atomic_inc(&adapter->irq_sem);
+	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
+	 * need for the IMC write */
 #endif
 
 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
@@ -3939,7 +3927,6 @@ e1000_intr(int irq, void *data)
 #ifdef CONFIG_E1000_NAPI
 	if (unlikely(hw->mac_type < e1000_82571)) {
 		/* disable interrupts, without the synchronize_irq bit */
-		atomic_inc(&adapter->irq_sem);
 		E1000_WRITE_REG(hw, IMC, ~0);
 		E1000_WRITE_FLUSH(hw);
 	}
@@ -3964,10 +3951,8 @@ e1000_intr(int irq, void *data)
 	 * in dead lock. Writing IMC forces 82547 into
 	 * de-assertion state.
 	 */
-	if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
-		atomic_inc(&adapter->irq_sem);
+	if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
 		E1000_WRITE_REG(hw, IMC, ~0);
-	}
 
 	adapter->total_tx_bytes = 0;
 	adapter->total_rx_bytes = 0;
@@ -4038,7 +4023,7 @@ e1000_clean(struct napi_struct *napi, int budget)
  * @adapter: board private structure
  **/
 
-static boolean_t
+static bool
 e1000_clean_tx_irq(struct e1000_adapter *adapter,
                    struct e1000_tx_ring *tx_ring)
 {
@@ -4049,7 +4034,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 #ifdef CONFIG_E1000_NAPI
 	unsigned int count = 0;
 #endif
-	boolean_t cleaned = FALSE;
+	bool cleaned = false;
 	unsigned int total_tx_bytes=0, total_tx_packets=0;
 
 	i = tx_ring->next_to_clean;
@@ -4057,7 +4042,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
 
 	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
-		for (cleaned = FALSE; !cleaned; ) {
+		for (cleaned = false; !cleaned; ) {
 			tx_desc = E1000_TX_DESC(*tx_ring, i);
 			buffer_info = &tx_ring->buffer_info[i];
 			cleaned = (i == eop);
@@ -4105,7 +4090,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 	if (adapter->detect_tx_hung) {
 		/* Detect a transmit hang in hardware, this serializes the
 		 * check with the clearing of time_stamp and movement of i */
-		adapter->detect_tx_hung = FALSE;
+		adapter->detect_tx_hung = false;
 		if (tx_ring->buffer_info[eop].dma &&
 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
 		               (adapter->tx_timeout_factor * HZ))
@@ -4154,11 +4139,11 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 
 static void
 e1000_rx_checksum(struct e1000_adapter *adapter,
-		  uint32_t status_err, uint32_t csum,
+		  u32 status_err, u32 csum,
 		  struct sk_buff *skb)
 {
-	uint16_t status = (uint16_t)status_err;
-	uint8_t errors = (uint8_t)(status_err >> 24);
+	u16 status = (u16)status_err;
+	u8 errors = (u8)(status_err >> 24);
 	skb->ip_summed = CHECKSUM_NONE;
 
 	/* 82543 or newer only */
@@ -4200,7 +4185,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
  * @adapter: board private structure
  **/
 
-static boolean_t
+static bool
 #ifdef CONFIG_E1000_NAPI
 e1000_clean_rx_irq(struct e1000_adapter *adapter,
                    struct e1000_rx_ring *rx_ring,
@@ -4215,11 +4200,11 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 	struct e1000_rx_desc *rx_desc, *next_rxd;
 	struct e1000_buffer *buffer_info, *next_buffer;
 	unsigned long flags;
-	uint32_t length;
-	uint8_t last_byte;
+	u32 length;
+	u8 last_byte;
 	unsigned int i;
 	int cleaned_count = 0;
-	boolean_t cleaned = FALSE;
+	bool cleaned = false;
 	unsigned int total_rx_bytes=0, total_rx_packets=0;
 
 	i = rx_ring->next_to_clean;
@@ -4247,7 +4232,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
 		next_buffer = &rx_ring->buffer_info[i];
 
-		cleaned = TRUE;
+		cleaned = true;
 		cleaned_count++;
 		pci_unmap_single(pdev,
 		                 buffer_info->dma,
@@ -4316,8 +4301,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
 		/* Receive Checksum Offload */
 		e1000_rx_checksum(adapter,
-				  (uint32_t)(status) |
-				  ((uint32_t)(rx_desc->errors) << 24),
+				  (u32)(status) |
+				  ((u32)(rx_desc->errors) << 24),
 				  le16_to_cpu(rx_desc->csum), skb);
 
 		skb->protocol = eth_type_trans(skb, netdev);
@@ -4373,7 +4358,7 @@ next_desc:
  * @adapter: board private structure
  **/
 
-static boolean_t
+static bool
 #ifdef CONFIG_E1000_NAPI
 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                       struct e1000_rx_ring *rx_ring,
@@ -4391,9 +4376,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 	struct e1000_ps_page_dma *ps_page_dma;
 	struct sk_buff *skb;
 	unsigned int i, j;
-	uint32_t length, staterr;
+	u32 length, staterr;
 	int cleaned_count = 0;
-	boolean_t cleaned = FALSE;
+	bool cleaned = false;
 	unsigned int total_rx_bytes=0, total_rx_packets=0;
 
 	i = rx_ring->next_to_clean;
@@ -4420,7 +4405,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 
 		next_buffer = &rx_ring->buffer_info[i];
 
-		cleaned = TRUE;
+		cleaned = true;
 		cleaned_count++;
 		pci_unmap_single(pdev, buffer_info->dma,
 				 buffer_info->length,
@@ -4774,8 +4759,8 @@ no_buffers:
 static void
 e1000_smartspeed(struct e1000_adapter *adapter)
 {
-	uint16_t phy_status;
-	uint16_t phy_ctrl;
+	u16 phy_status;
+	u16 phy_ctrl;
 
 	if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
 	   !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
@@ -4854,8 +4839,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct mii_ioctl_data *data = if_mii(ifr);
 	int retval;
-	uint16_t mii_reg;
-	uint16_t spddplx;
+	u16 mii_reg;
+	u16 spddplx;
 	unsigned long flags;
 
 	if (adapter->hw.media_type != e1000_media_type_copper)
@@ -4974,11 +4959,11 @@ e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
 	pcix_set_mmrbc(adapter->pdev, mmrbc);
 }
 
-int32_t
-e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
+s32
+e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
 {
     struct e1000_adapter *adapter = hw->back;
-    uint16_t cap_offset;
+    u16 cap_offset;
 
     cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
     if (!cap_offset)
@@ -4990,7 +4975,7 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
 }
 
 void
-e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
+e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
 {
 	outl(value, port);
 }
@@ -4999,9 +4984,10 @@ static void
 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	uint32_t ctrl, rctl;
+	u32 ctrl, rctl;
 
-	e1000_irq_disable(adapter);
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_disable(adapter);
 	adapter->vlgrp = grp;
 
 	if (grp) {
@@ -5030,7 +5016,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 			rctl &= ~E1000_RCTL_VFE;
 			E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
 			if (adapter->mng_vlan_id !=
-			    (uint16_t)E1000_MNG_VLAN_NONE) {
+			    (u16)E1000_MNG_VLAN_NONE) {
 				e1000_vlan_rx_kill_vid(netdev,
 				                       adapter->mng_vlan_id);
 				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -5038,14 +5024,15 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 		}
 	}
 
-	e1000_irq_enable(adapter);
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_enable(adapter);
 }
 
 static void
-e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
+e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	uint32_t vfta, index;
+	u32 vfta, index;
 
 	if ((adapter->hw.mng_cookie.status &
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
@@ -5059,14 +5046,16 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
 }
 
 static void
-e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
+e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	uint32_t vfta, index;
+	u32 vfta, index;
 
-	e1000_irq_disable(adapter);
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_disable(adapter);
 	vlan_group_set_device(adapter->vlgrp, vid, NULL);
-	e1000_irq_enable(adapter);
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_enable(adapter);
 
 	if ((adapter->hw.mng_cookie.status &
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
@@ -5089,7 +5078,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
 	e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
 
 	if (adapter->vlgrp) {
-		uint16_t vid;
+		u16 vid;
 		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
 			if (!vlan_group_get_device(adapter->vlgrp, vid))
 				continue;
@@ -5099,7 +5088,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
 }
 
 int
-e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
+e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
 {
 	adapter->hw.autoneg = 0;
 
@@ -5140,8 +5129,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	uint32_t ctrl, ctrl_ext, rctl, status;
-	uint32_t wufc = adapter->wol;
+	u32 ctrl, ctrl_ext, rctl, status;
+	u32 wufc = adapter->wol;
 #ifdef CONFIG_PM
 	int retval = 0;
 #endif
@@ -5238,7 +5227,7 @@ e1000_resume(struct pci_dev *pdev)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	uint32_t err;
+	u32 err;
 
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);

+ 0 - 7
drivers/net/e1000/e1000_osdep.h

@@ -41,13 +41,6 @@
 #include <linux/interrupt.h>
 #include <linux/sched.h>
 
-typedef enum {
-#undef FALSE
-    FALSE = 0,
-#undef TRUE
-    TRUE = 1
-} boolean_t;
-
 #ifdef DBG
 #define DEBUGOUT(S)		printk(KERN_DEBUG S "\n")
 #define DEBUGOUT1(S, A...)	printk(KERN_DEBUG S "\n", A)

+ 98 - 65
drivers/net/e1000e/82571.c

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,9 @@
 /*
  * 82571EB Gigabit Ethernet Controller
  * 82571EB Gigabit Ethernet Controller (Fiber)
+ * 82571EB Dual Port Gigabit Mezzanine Adapter
+ * 82571EB Quad Port Gigabit Mezzanine Adapter
+ * 82571PT Gigabit PT Quad Port Server ExpressModule
  * 82572EI Gigabit Ethernet Controller (Copper)
  * 82572EI Gigabit Ethernet Controller (Fiber)
  * 82572EI Gigabit Ethernet Controller
@@ -72,7 +75,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
 
-	if (hw->media_type != e1000_media_type_copper) {
+	if (hw->phy.media_type != e1000_media_type_copper) {
 		phy->type = e1000_phy_none;
 		return 0;
 	}
@@ -150,7 +153,8 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
 		if (((eecd >> 15) & 0x3) == 0x3) {
 			nvm->type = e1000_nvm_flash_hw;
 			nvm->word_size = 2048;
-			/* Autonomous Flash update bit must be cleared due
+			/*
+			 * Autonomous Flash update bit must be cleared due
 			 * to Flash update issue.
 			 */
 			eecd &= ~E1000_EECD_AUPDEN;
@@ -159,13 +163,18 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
 		}
 		/* Fall Through */
 	default:
-		nvm->type	= e1000_nvm_eeprom_spi;
+		nvm->type = e1000_nvm_eeprom_spi;
 		size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
 				  E1000_EECD_SIZE_EX_SHIFT);
-		/* Added to a constant, "size" becomes the left-shift value
+		/*
+		 * Added to a constant, "size" becomes the left-shift value
 		 * for setting word_size.
 		 */
 		size += NVM_WORD_SIZE_BASE_SHIFT;
+
+		/* EEPROM access above 16k is unsupported */
+		if (size > 14)
+			size = 14;
 		nvm->word_size	= 1 << size;
 		break;
 	}
@@ -190,16 +199,16 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
 	case E1000_DEV_ID_82571EB_FIBER:
 	case E1000_DEV_ID_82572EI_FIBER:
 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
-		hw->media_type = e1000_media_type_fiber;
+		hw->phy.media_type = e1000_media_type_fiber;
 		break;
 	case E1000_DEV_ID_82571EB_SERDES:
 	case E1000_DEV_ID_82572EI_SERDES:
 	case E1000_DEV_ID_82571EB_SERDES_DUAL:
 	case E1000_DEV_ID_82571EB_SERDES_QUAD:
-		hw->media_type = e1000_media_type_internal_serdes;
+		hw->phy.media_type = e1000_media_type_internal_serdes;
 		break;
 	default:
-		hw->media_type = e1000_media_type_copper;
+		hw->phy.media_type = e1000_media_type_copper;
 		break;
 	}
 
@@ -208,25 +217,28 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
 	/* Set rar entry count */
 	mac->rar_entry_count = E1000_RAR_ENTRIES;
 	/* Set if manageability features are enabled. */
-	mac->arc_subsystem_valid =
-		(er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
+	mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
 
 	/* check for link */
-	switch (hw->media_type) {
+	switch (hw->phy.media_type) {
 	case e1000_media_type_copper:
 		func->setup_physical_interface = e1000_setup_copper_link_82571;
 		func->check_for_link = e1000e_check_for_copper_link;
 		func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
 		break;
 	case e1000_media_type_fiber:
-		func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571;
+		func->setup_physical_interface =
+			e1000_setup_fiber_serdes_link_82571;
 		func->check_for_link = e1000e_check_for_fiber_link;
-		func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes;
+		func->get_link_up_info =
+			e1000e_get_speed_and_duplex_fiber_serdes;
 		break;
 	case e1000_media_type_internal_serdes:
-		func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571;
+		func->setup_physical_interface =
+			e1000_setup_fiber_serdes_link_82571;
 		func->check_for_link = e1000e_check_for_serdes_link;
-		func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes;
+		func->get_link_up_info =
+			e1000e_get_speed_and_duplex_fiber_serdes;
 		break;
 	default:
 		return -E1000_ERR_CONFIG;
@@ -236,7 +248,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
 	return 0;
 }
 
-static s32 e1000_get_invariants_82571(struct e1000_adapter *adapter)
+static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	static int global_quad_port_a; /* global port a indication */
@@ -322,10 +334,12 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
 	switch (hw->mac.type) {
 	case e1000_82571:
 	case e1000_82572:
-		/* The 82571 firmware may still be configuring the PHY.
+		/*
+		 * The 82571 firmware may still be configuring the PHY.
 		 * In this case, we cannot access the PHY until the
 		 * configuration is done.  So we explicitly set the
-		 * PHY ID. */
+		 * PHY ID.
+		 */
 		phy->id = IGP01E1000_I_PHY_ID;
 		break;
 	case e1000_82573:
@@ -479,8 +493,10 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	/* If our nvm is an EEPROM, then we're done
-	 * otherwise, commit the checksum to the flash NVM. */
+	/*
+	 * If our nvm is an EEPROM, then we're done
+	 * otherwise, commit the checksum to the flash NVM.
+	 */
 	if (hw->nvm.type != e1000_nvm_flash_hw)
 		return ret_val;
 
@@ -496,7 +512,8 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
 
 	/* Reset the firmware if using STM opcode. */
 	if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
-		/* The enabling of and the actual reset must be done
+		/*
+		 * The enabling of and the actual reset must be done
 		 * in two write cycles.
 		 */
 		ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
@@ -557,8 +574,10 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
 	u32 eewr = 0;
 	s32 ret_val = 0;
 
-	/* A check for invalid values:  offset too large, too many words,
-	 * and not enough words. */
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
 	    (words == 0)) {
 		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@@ -645,30 +664,32 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
 	} else {
 		data &= ~IGP02E1000_PM_D0_LPLU;
 		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
-		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
 		 * during Dx states where the power conservation is most
 		 * important.  During driver activity we should enable
-		 * SmartSpeed, so performance is maintained. */
+		 * SmartSpeed, so performance is maintained.
+		 */
 		if (phy->smart_speed == e1000_smart_speed_on) {
 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     &data);
+					   &data);
 			if (ret_val)
 				return ret_val;
 
 			data |= IGP01E1000_PSCFR_SMART_SPEED;
 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     data);
+					   data);
 			if (ret_val)
 				return ret_val;
 		} else if (phy->smart_speed == e1000_smart_speed_off) {
 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     &data);
+					   &data);
 			if (ret_val)
 				return ret_val;
 
 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     data);
+					   data);
 			if (ret_val)
 				return ret_val;
 		}
@@ -693,7 +714,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 	s32 ret_val;
 	u16 i = 0;
 
-	/* Prevent the PCI-E bus from sticking if there is no TLP connection
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
 	 * on the last TLP read/write transaction when MAC is reset.
 	 */
 	ret_val = e1000e_disable_pcie_master(hw);
@@ -709,8 +731,10 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 
 	msleep(10);
 
-	/* Must acquire the MDIO ownership before MAC reset.
-	 * Ownership defaults to firmware after a reset. */
+	/*
+	 * Must acquire the MDIO ownership before MAC reset.
+	 * Ownership defaults to firmware after a reset.
+	 */
 	if (hw->mac.type == e1000_82573) {
 		extcnf_ctrl = er32(EXTCNF_CTRL);
 		extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
@@ -747,7 +771,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 		/* We don't want to continue accessing MAC registers. */
 		return ret_val;
 
-	/* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+	/*
+	 * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
 	 * Need to wait for Phy configuration completion before accessing
 	 * NVM and Phy.
 	 */
@@ -793,7 +818,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
 	e1000e_clear_vfta(hw);
 
 	/* Setup the receive address. */
-	/* If, however, a locally administered address was assigned to the
+	/*
+	 * If, however, a locally administered address was assigned to the
 	 * 82571, we must reserve a RAR for it to work around an issue where
 	 * resetting one port will reload the MAC on the other port.
 	 */
@@ -810,19 +836,19 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
 	ret_val = e1000_setup_link_82571(hw);
 
 	/* Set the transmit descriptor write-back policy */
-	reg_data = er32(TXDCTL);
+	reg_data = er32(TXDCTL(0));
 	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
 		   E1000_TXDCTL_FULL_TX_DESC_WB |
 		   E1000_TXDCTL_COUNT_DESC;
-	ew32(TXDCTL, reg_data);
+	ew32(TXDCTL(0), reg_data);
 
 	/* ...for both queues. */
 	if (mac->type != e1000_82573) {
-		reg_data = er32(TXDCTL1);
+		reg_data = er32(TXDCTL(1));
 		reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
 			   E1000_TXDCTL_FULL_TX_DESC_WB |
 			   E1000_TXDCTL_COUNT_DESC;
-		ew32(TXDCTL1, reg_data);
+		ew32(TXDCTL(1), reg_data);
 	} else {
 		e1000e_enable_tx_pkt_filtering(hw);
 		reg_data = er32(GCR);
@@ -830,7 +856,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
 		ew32(GCR, reg_data);
 	}
 
-	/* Clear all of the statistics registers (clear on read).  It is
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
 	 * important that we do this after we have tried to establish link
 	 * because the symbol error count will increment wildly if there
 	 * is no link.
@@ -851,17 +878,17 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
 	u32 reg;
 
 	/* Transmit Descriptor Control 0 */
-	reg = er32(TXDCTL);
+	reg = er32(TXDCTL(0));
 	reg |= (1 << 22);
-	ew32(TXDCTL, reg);
+	ew32(TXDCTL(0), reg);
 
 	/* Transmit Descriptor Control 1 */
-	reg = er32(TXDCTL1);
+	reg = er32(TXDCTL(1));
 	reg |= (1 << 22);
-	ew32(TXDCTL1, reg);
+	ew32(TXDCTL(1), reg);
 
 	/* Transmit Arbitration Control 0 */
-	reg = er32(TARC0);
+	reg = er32(TARC(0));
 	reg &= ~(0xF << 27); /* 30:27 */
 	switch (hw->mac.type) {
 	case e1000_82571:
@@ -871,10 +898,10 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
 	default:
 		break;
 	}
-	ew32(TARC0, reg);
+	ew32(TARC(0), reg);
 
 	/* Transmit Arbitration Control 1 */
-	reg = er32(TARC1);
+	reg = er32(TARC(1));
 	switch (hw->mac.type) {
 	case e1000_82571:
 	case e1000_82572:
@@ -884,7 +911,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
 			reg &= ~(1 << 28);
 		else
 			reg |= (1 << 28);
-		ew32(TARC1, reg);
+		ew32(TARC(1), reg);
 		break;
 	default:
 		break;
@@ -922,7 +949,8 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
 
 	if (hw->mac.type == e1000_82573) {
 		if (hw->mng_cookie.vlan_id != 0) {
-			/* The VFTA is a 4096b bit-field, each identifying
+			/*
+			 * The VFTA is a 4096b bit-field, each identifying
 			 * a single VLAN ID.  The following operations
 			 * determine which 32b entry (i.e. offset) into the
 			 * array we want to set the VLAN ID (i.e. bit) of
@@ -936,7 +964,8 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
 		}
 	}
 	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
-		/* If the offset we want to clear is the same offset of the
+		/*
+		 * If the offset we want to clear is the same offset of the
 		 * manageability VLAN ID, then clear all bits except that of
 		 * the manageability unit.
 		 */
@@ -947,7 +976,7 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
 }
 
 /**
- *  e1000_mc_addr_list_update_82571 - Update Multicast addresses
+ *  e1000_update_mc_addr_list_82571 - Update Multicast addresses
  *  @hw: pointer to the HW structure
  *  @mc_addr_list: array of multicast addresses to program
  *  @mc_addr_count: number of multicast addresses to program
@@ -959,7 +988,7 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
  *  The parameter rar_count will usually be hw->mac.rar_entry_count
  *  unless there are workarounds that change this.
  **/
-static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw,
+static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
 					    u8 *mc_addr_list,
 					    u32 mc_addr_count,
 					    u32 rar_used_count,
@@ -968,8 +997,8 @@ static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw,
 	if (e1000e_get_laa_state_82571(hw))
 		rar_count--;
 
-	e1000e_mc_addr_list_update_generic(hw, mc_addr_list, mc_addr_count,
-					  rar_used_count, rar_count);
+	e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
+					   rar_used_count, rar_count);
 }
 
 /**
@@ -984,12 +1013,13 @@ static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw,
  **/
 static s32 e1000_setup_link_82571(struct e1000_hw *hw)
 {
-	/* 82573 does not have a word in the NVM to determine
+	/*
+	 * 82573 does not have a word in the NVM to determine
 	 * the default flow control setting, so we explicitly
 	 * set it to full.
 	 */
 	if (hw->mac.type == e1000_82573)
-		hw->mac.fc = e1000_fc_full;
+		hw->fc.type = e1000_fc_full;
 
 	return e1000e_setup_link(hw);
 }
@@ -1050,14 +1080,14 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
 	switch (hw->mac.type) {
 	case e1000_82571:
 	case e1000_82572:
-		/* If SerDes loopback mode is entered, there is no form
+		/*
+		 * If SerDes loopback mode is entered, there is no form
 		 * of reset to take the adapter out of that mode.  So we
 		 * have to explicitly take the adapter out of loopback
 		 * mode.  This prevents drivers from twiddling their thumbs
 		 * if another tool failed to take it out of loopback mode.
 		 */
-		ew32(SCTL,
-				E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+		ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
 		break;
 	default:
 		break;
@@ -1124,7 +1154,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
 
 	/* If workaround is activated... */
 	if (state)
-		/* Hold a copy of the LAA in RAR[14] This is done so that
+		/*
+		 * Hold a copy of the LAA in RAR[14] This is done so that
 		 * between the time RAR[0] gets clobbered and the time it
 		 * gets fixed, the actual LAA is in one of the RARs and no
 		 * incoming packets directed to this port are dropped.
@@ -1152,7 +1183,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
 	if (nvm->type != e1000_nvm_flash_hw)
 		return 0;
 
-	/* Check bit 4 of word 10h.  If it is 0, firmware is done updating
+	/*
+	 * Check bit 4 of word 10h.  If it is 0, firmware is done updating
 	 * 10h-12h.  Checksum may need to be fixed.
 	 */
 	ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
@@ -1160,7 +1192,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
 		return ret_val;
 
 	if (!(data & 0x10)) {
-		/* Read 0x23 and check bit 15.  This bit is a 1
+		/*
+		 * Read 0x23 and check bit 15.  This bit is a 1
 		 * when the checksum has already been fixed.  If
 		 * the checksum is still wrong and this bit is a
 		 * 1, we need to return bad checksum.  Otherwise,
@@ -1240,7 +1273,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
 	/* .get_link_up_info: media type dependent */
 	.led_on			= e1000e_led_on_generic,
 	.led_off		= e1000e_led_off_generic,
-	.mc_addr_list_update	= e1000_mc_addr_list_update_82571,
+	.update_mc_addr_list	= e1000_update_mc_addr_list_82571,
 	.reset_hw		= e1000_reset_hw_82571,
 	.init_hw		= e1000_init_hw_82571,
 	.setup_link		= e1000_setup_link_82571,
@@ -1304,7 +1337,7 @@ struct e1000_info e1000_82571_info = {
 				  | FLAG_TARC_SPEED_MODE_BIT /* errata */
 				  | FLAG_APME_CHECK_PORT_B,
 	.pba			= 38,
-	.get_invariants		= e1000_get_invariants_82571,
+	.get_variants		= e1000_get_variants_82571,
 	.mac_ops		= &e82571_mac_ops,
 	.phy_ops		= &e82_phy_ops_igp,
 	.nvm_ops		= &e82571_nvm_ops,
@@ -1322,7 +1355,7 @@ struct e1000_info e1000_82572_info = {
 				  | FLAG_HAS_STATS_ICR_ICT
 				  | FLAG_TARC_SPEED_MODE_BIT, /* errata */
 	.pba			= 38,
-	.get_invariants		= e1000_get_invariants_82571,
+	.get_variants		= e1000_get_variants_82571,
 	.mac_ops		= &e82571_mac_ops,
 	.phy_ops		= &e82_phy_ops_igp,
 	.nvm_ops		= &e82571_nvm_ops,
@@ -1342,7 +1375,7 @@ struct e1000_info e1000_82573_info = {
 				  | FLAG_HAS_ERT
 				  | FLAG_HAS_SWSM_ON_LOAD,
 	.pba			= 20,
-	.get_invariants		= e1000_get_invariants_82571,
+	.get_variants		= e1000_get_variants_82571,
 	.mac_ops		= &e82571_mac_ops,
 	.phy_ops		= &e82_phy_ops_m88,
 	.nvm_ops		= &e82571_nvm_ops,

+ 1 - 1
drivers/net/e1000e/Makefile

@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2007 Intel Corporation.
+# Copyright(c) 1999 - 2008 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,

+ 58 - 51
drivers/net/e1000e/defines.h

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -120,10 +120,10 @@
 #define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
 #define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
 #define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
-#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000 /* Enable MAC address
-						    * filtering */
-#define E1000_MANC_EN_MNG2HOST   0x00200000 /* Enable MNG packets to host
-					     * memory */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST   0x00200000
 
 /* Receive Control */
 #define E1000_RCTL_EN             0x00000002    /* enable */
@@ -135,25 +135,26 @@
 #define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
 #define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
 #define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
-#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* Rx desc min threshold size */
 #define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
 #define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
 /* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
-#define E1000_RCTL_SZ_2048        0x00000000    /* rx buffer size 2048 */
-#define E1000_RCTL_SZ_1024        0x00010000    /* rx buffer size 1024 */
-#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
-#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* Rx buffer size 256 */
 /* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
-#define E1000_RCTL_SZ_16384       0x00010000    /* rx buffer size 16384 */
-#define E1000_RCTL_SZ_8192        0x00020000    /* rx buffer size 8192 */
-#define E1000_RCTL_SZ_4096        0x00030000    /* rx buffer size 4096 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* Rx buffer size 4096 */
 #define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
 #define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
 #define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
 #define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
 #define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
 
-/* Use byte values for the following shift parameters
+/*
+ * Use byte values for the following shift parameters
  * Usage:
  *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
  *                  E1000_PSRCTL_BSIZE0_MASK) |
@@ -206,7 +207,8 @@
 #define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
 #define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
 
-/* Bit definitions for the Management Data IO (MDIO) and Management Data
+/*
+ * Bit definitions for the Management Data IO (MDIO) and Management Data
  * Clock (MDC) pins in the Device Control Register.
  */
 
@@ -279,7 +281,7 @@
 #define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
 
 /* Transmit Control */
-#define E1000_TCTL_EN     0x00000002    /* enable tx */
+#define E1000_TCTL_EN     0x00000002    /* enable Tx */
 #define E1000_TCTL_PSP    0x00000008    /* pad short packets */
 #define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
 #define E1000_TCTL_COLD   0x003ff000    /* collision distance */
@@ -337,8 +339,8 @@
 #define E1000_KABGTXD_BGSQLBIAS           0x00050000
 
 /* PBA constants */
-#define E1000_PBA_8K  0x0008    /* 8KB, default Rx allocation */
-#define E1000_PBA_16K 0x0010    /* 16KB, default TX allocation */
+#define E1000_PBA_8K  0x0008    /* 8KB */
+#define E1000_PBA_16K 0x0010    /* 16KB */
 
 #define E1000_PBS_16K E1000_PBA_16K
 
@@ -356,12 +358,13 @@
 /* Interrupt Cause Read */
 #define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
 #define E1000_ICR_LSC           0x00000004 /* Link Status Change */
-#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
-#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
-#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
 #define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
 
-/* This defines the bits that are set in the Interrupt Mask
+/*
+ * This defines the bits that are set in the Interrupt Mask
  * Set/Read Register.  Each bit is documented below:
  *   o RXT0   = Receiver Timer Interrupt (ring 0)
  *   o TXDW   = Transmit Descriptor Written Back
@@ -379,21 +382,22 @@
 /* Interrupt Mask Set */
 #define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
 #define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
-#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
-#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
-#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
 
 /* Interrupt Cause Set */
 #define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
 #define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
 
 /* Transmit Descriptor Control */
 #define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
 #define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
 #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
 #define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
-#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
-					      still to be processed. */
+/* Enable the counting of desc. still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
 
 /* Flow Control Constants */
 #define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
@@ -404,7 +408,8 @@
 #define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
 
 /* Receive Address */
-/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+/*
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
  * Registers) holds the directed and multicast addresses that we monitor.
  * Technically, we have 16 spots.  However, we reserve one of these spots
  * (RAR[15]) for our directed address used by controllers with
@@ -533,8 +538,8 @@
 #define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
 #define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
 #define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
-#define E1000_EECD_ADDR_BITS 0x00000400 /* NVM Addressing bits based on type
-					 * (0-small, 1-large) */
+/* NVM Addressing bits based on type (0-small, 1-large) */
+#define E1000_EECD_ADDR_BITS 0x00000400
 #define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
 #define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
 #define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
@@ -626,7 +631,8 @@
 #define MAX_PHY_MULTI_PAGE_REG 0xF
 
 /* Bit definitions for valid PHY IDs. */
-/* I = Integrated
+/*
+ * I = Integrated
  * E = External
  */
 #define M88E1000_E_PHY_ID    0x01410C50
@@ -653,37 +659,37 @@
 #define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
 					       /* Manual MDI configuration */
 #define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
-#define M88E1000_PSCR_AUTO_X_1000T     0x0040  /* 1000BASE-T: Auto crossover,
-						*  100BASE-TX/10BASE-T:
-						*  MDI Mode
-						*/
-#define M88E1000_PSCR_AUTO_X_MODE      0x0060  /* Auto crossover enabled
-						* all speeds.
-						*/
-					/* 1=Enable Extended 10BASE-T distance
-					 * (Lower 10BASE-T RX Threshold)
-					 * 0=Normal 10BASE-T RX Threshold */
-					/* 1=5-Bit interface in 100BASE-TX
-					 * 0=MII interface in 100BASE-TX */
-#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060
+/*
+ * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
 
 /* M88E1000 PHY Specific Status Register */
 #define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
 #define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
 #define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
-#define M88E1000_PSSR_CABLE_LENGTH       0x0380 /* 0=<50M;1=50-80M;2=80-110M;
-					    * 3=110-140M;4=>140M */
+/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380
 #define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
 #define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
 
 #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
 
-/* Number of times we will attempt to autonegotiate before downshifting if we
- * are the master */
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
 #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
 #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
-/* Number of times we will attempt to autonegotiate before downshifting if we
- * are the slave */
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
 #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
 #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
 #define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
@@ -692,7 +698,8 @@
 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
 
-/* Bits...
+/*
+ * Bits...
  * 15-5: page
  * 4-0: register offset
  */

+ 19 - 18
drivers/net/e1000e/e1000.h

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -61,7 +61,7 @@ struct e1000_info;
 	ndev_printk(KERN_NOTICE , netdev, format, ## arg)
 
 
-/* TX/RX descriptor defines */
+/* Tx/Rx descriptor defines */
 #define E1000_DEFAULT_TXD		256
 #define E1000_MAX_TXD			4096
 #define E1000_MIN_TXD			80
@@ -114,13 +114,13 @@ struct e1000_buffer {
 	dma_addr_t dma;
 	struct sk_buff *skb;
 	union {
-		/* TX */
+		/* Tx */
 		struct {
 			unsigned long time_stamp;
 			u16 length;
 			u16 next_to_watch;
 		};
-		/* RX */
+		/* Rx */
 		/* arrays of page information for packet split */
 		struct e1000_ps_page *ps_pages;
 	};
@@ -167,9 +167,6 @@ struct e1000_adapter {
 
 	spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
 
-	/* this is still needed for 82571 and above */
-	atomic_t irq_sem;
-
 	/* track device up/down/testing state */
 	unsigned long state;
 
@@ -180,7 +177,7 @@ struct e1000_adapter {
 	u16 rx_itr;
 
 	/*
-	 * TX
+	 * Tx
 	 */
 	struct e1000_ring *tx_ring /* One per active queue */
 						____cacheline_aligned_in_smp;
@@ -202,7 +199,7 @@ struct e1000_adapter {
 	unsigned int total_rx_bytes;
 	unsigned int total_rx_packets;
 
-	/* TX stats */
+	/* Tx stats */
 	u64 tpt_old;
 	u64 colc_old;
 	u64 gotcl_old;
@@ -214,7 +211,7 @@ struct e1000_adapter {
 	u32 tx_dma_failed;
 
 	/*
-	 * RX
+	 * Rx
 	 */
 	bool (*clean_rx) (struct e1000_adapter *adapter,
 			  int *work_done, int work_to_do)
@@ -226,7 +223,7 @@ struct e1000_adapter {
 	u32 rx_int_delay;
 	u32 rx_abs_int_delay;
 
-	/* RX stats */
+	/* Rx stats */
 	u64 hw_csum_err;
 	u64 hw_csum_good;
 	u64 rx_hdr_split;
@@ -237,6 +234,8 @@ struct e1000_adapter {
 
 	unsigned int rx_ps_pages;
 	u16 rx_ps_bsize0;
+	u32 max_frame_size;
+	u32 min_frame_size;
 
 	/* OS defined structs */
 	struct net_device *netdev;
@@ -261,7 +260,7 @@ struct e1000_adapter {
 	u32 wol;
 	u32 pba;
 
-	u8 fc_autoneg;
+	bool fc_autoneg;
 
 	unsigned long led_status;
 
@@ -272,7 +271,7 @@ struct e1000_info {
 	enum e1000_mac_type	mac;
 	unsigned int		flags;
 	u32			pba;
-	s32			(*get_invariants)(struct e1000_adapter *);
+	s32			(*get_variants)(struct e1000_adapter *);
 	struct e1000_mac_operations *mac_ops;
 	struct e1000_phy_operations *phy_ops;
 	struct e1000_nvm_operations *nvm_ops;
@@ -308,6 +307,7 @@ struct e1000_info {
 #define FLAG_MSI_ENABLED                  (1 << 27)
 #define FLAG_RX_CSUM_ENABLED              (1 << 28)
 #define FLAG_TSO_FORCE                    (1 << 29)
+#define FLAG_RX_RESTART_NOW               (1 << 30)
 
 #define E1000_RX_DESC_PS(R, i)	    \
 	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -357,7 +357,7 @@ extern struct e1000_info e1000_ich8_info;
 extern struct e1000_info e1000_ich9_info;
 extern struct e1000_info e1000_es2_info;
 
-extern s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num);
+extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
 
 extern s32  e1000e_commit_phy(struct e1000_hw *hw);
 
@@ -390,9 +390,11 @@ extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
 extern s32 e1000e_setup_link(struct e1000_hw *hw);
 extern void e1000e_clear_vfta(struct e1000_hw *hw);
 extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
-extern void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
-				       u8 *mc_addr_list, u32 mc_addr_count,
-				       u32 rar_used_count, u32 rar_count);
+extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+					       u8 *mc_addr_list,
+					       u32 mc_addr_count,
+					       u32 rar_used_count,
+					       u32 rar_count);
 extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
 extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
 extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
@@ -462,7 +464,6 @@ extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
 extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
 extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
-extern s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
 extern void e1000e_release_nvm(struct e1000_hw *hw);

+ 78 - 59
drivers/net/e1000e/es2lan.c

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -92,7 +92,8 @@
 /* In-Band Control Register (Page 194, Register 18) */
 #define GG82563_ICR_DIS_PADDING			 0x0010 /* Disable Padding */
 
-/* A table for the GG82563 cable length where the range is defined
+/*
+ * A table for the GG82563 cable length where the range is defined
  * with a lower bound at "index" and the upper bound at
  * "index + 5".
  */
@@ -118,7 +119,7 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
 
-	if (hw->media_type != e1000_media_type_copper) {
+	if (hw->phy.media_type != e1000_media_type_copper) {
 		phy->type	= e1000_phy_none;
 		return 0;
 	}
@@ -167,15 +168,20 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
 		break;
 	}
 
-	nvm->type	       = e1000_nvm_eeprom_spi;
+	nvm->type = e1000_nvm_eeprom_spi;
 
 	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
 			  E1000_EECD_SIZE_EX_SHIFT);
 
-	/* Added to a constant, "size" becomes the left-shift value
+	/*
+	 * Added to a constant, "size" becomes the left-shift value
 	 * for setting word_size.
 	 */
 	size += NVM_WORD_SIZE_BASE_SHIFT;
+
+	/* EEPROM access above 16k is unsupported */
+	if (size > 14)
+		size = 14;
 	nvm->word_size	= 1 << size;
 
 	return 0;
@@ -196,10 +202,10 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
 	/* Set media type */
 	switch (adapter->pdev->device) {
 	case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
-		hw->media_type = e1000_media_type_internal_serdes;
+		hw->phy.media_type = e1000_media_type_internal_serdes;
 		break;
 	default:
-		hw->media_type = e1000_media_type_copper;
+		hw->phy.media_type = e1000_media_type_copper;
 		break;
 	}
 
@@ -208,11 +214,10 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
 	/* Set rar entry count */
 	mac->rar_entry_count = E1000_RAR_ENTRIES;
 	/* Set if manageability features are enabled. */
-	mac->arc_subsystem_valid =
-		(er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
+	mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
 
 	/* check for link */
-	switch (hw->media_type) {
+	switch (hw->phy.media_type) {
 	case e1000_media_type_copper:
 		func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
 		func->check_for_link = e1000e_check_for_copper_link;
@@ -233,7 +238,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
 	return 0;
 }
 
-static s32 e1000_get_invariants_80003es2lan(struct e1000_adapter *adapter)
+static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	s32 rc;
@@ -344,8 +349,10 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
 		if (!(swfw_sync & (fwmask | swmask)))
 			break;
 
-		/* Firmware currently using resource (fwmask)
-		 * or other software thread using resource (swmask) */
+		/*
+		 * Firmware currently using resource (fwmask)
+		 * or other software thread using resource (swmask)
+		 */
 		e1000e_put_hw_semaphore(hw);
 		mdelay(5);
 		i++;
@@ -407,7 +414,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
 	if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
 		page_select = GG82563_PHY_PAGE_SELECT;
 	else
-		/* Use Alternative Page Select register to access
+		/*
+		 * Use Alternative Page Select register to access
 		 * registers 30 and 31
 		 */
 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -417,7 +425,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
 	if (ret_val)
 		return ret_val;
 
-	/* The "ready" bit in the MDIC register may be incorrectly set
+	/*
+	 * The "ready" bit in the MDIC register may be incorrectly set
 	 * before the device has completed the "Page Select" MDI
 	 * transaction.  So we wait 200us after each MDI command...
 	 */
@@ -462,7 +471,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
 	if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
 		page_select = GG82563_PHY_PAGE_SELECT;
 	else
-		/* Use Alternative Page Select register to access
+		/*
+		 * Use Alternative Page Select register to access
 		 * registers 30 and 31
 		 */
 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -473,7 +483,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
 		return ret_val;
 
 
-	/* The "ready" bit in the MDIC register may be incorrectly set
+	/*
+	 * The "ready" bit in the MDIC register may be incorrectly set
 	 * before the device has completed the "Page Select" MDI
 	 * transaction.  So we wait 200us after each MDI command...
 	 */
@@ -554,7 +565,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
 	u16 phy_data;
 	bool link;
 
-	/* Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
 	 * forced whenever speed and duplex are forced.
 	 */
 	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -583,7 +595,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
 
 	udelay(1);
 
-	if (hw->phy.wait_for_link) {
+	if (hw->phy.autoneg_wait_to_complete) {
 		hw_dbg(hw, "Waiting for forced speed/duplex link "
 			 "on GG82563 phy.\n");
 
@@ -593,7 +605,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
 			return ret_val;
 
 		if (!link) {
-			/* We didn't get link.
+			/*
+			 * We didn't get link.
 			 * Reset the DSP and cross our fingers.
 			 */
 			ret_val = e1000e_phy_reset_dsp(hw);
@@ -612,7 +625,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	/* Resetting the phy means we need to verify the TX_CLK corresponds
+	/*
+	 * Resetting the phy means we need to verify the TX_CLK corresponds
 	 * to the link speed.  10Mbps -> 2.5MHz, else 25MHz.
 	 */
 	phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
@@ -621,7 +635,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
 	else
 		phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
 
-	/* In addition, we must re-enable CRS on Tx for both half and full
+	/*
+	 * In addition, we must re-enable CRS on Tx for both half and full
 	 * duplex.
 	 */
 	phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
@@ -671,7 +686,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
 {
 	s32 ret_val;
 
-	if (hw->media_type == e1000_media_type_copper) {
+	if (hw->phy.media_type == e1000_media_type_copper) {
 		ret_val = e1000e_get_speed_and_duplex_copper(hw,
 								    speed,
 								    duplex);
@@ -704,7 +719,8 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
 	u32 icr;
 	s32 ret_val;
 
-	/* Prevent the PCI-E bus from sticking if there is no TLP connection
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
 	 * on the last TLP read/write transaction when MAC is reset.
 	 */
 	ret_val = e1000e_disable_pcie_master(hw);
@@ -776,16 +792,16 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
 	ret_val = e1000e_setup_link(hw);
 
 	/* Set the transmit descriptor write-back policy */
-	reg_data = er32(TXDCTL);
+	reg_data = er32(TXDCTL(0));
 	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
 		   E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
-	ew32(TXDCTL, reg_data);
+	ew32(TXDCTL(0), reg_data);
 
 	/* ...for both queues. */
-	reg_data = er32(TXDCTL1);
+	reg_data = er32(TXDCTL(1));
 	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
 		   E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
-	ew32(TXDCTL1, reg_data);
+	ew32(TXDCTL(1), reg_data);
 
 	/* Enable retransmit on late collisions */
 	reg_data = er32(TCTL);
@@ -808,7 +824,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
 	reg_data &= ~0x00100000;
 	E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
 
-	/* Clear all of the statistics registers (clear on read).  It is
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
 	 * important that we do this after we have tried to establish link
 	 * because the symbol error count will increment wildly if there
 	 * is no link.
@@ -829,29 +846,29 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
 	u32 reg;
 
 	/* Transmit Descriptor Control 0 */
-	reg = er32(TXDCTL);
+	reg = er32(TXDCTL(0));
 	reg |= (1 << 22);
-	ew32(TXDCTL, reg);
+	ew32(TXDCTL(0), reg);
 
 	/* Transmit Descriptor Control 1 */
-	reg = er32(TXDCTL1);
+	reg = er32(TXDCTL(1));
 	reg |= (1 << 22);
-	ew32(TXDCTL1, reg);
+	ew32(TXDCTL(1), reg);
 
 	/* Transmit Arbitration Control 0 */
-	reg = er32(TARC0);
+	reg = er32(TARC(0));
 	reg &= ~(0xF << 27); /* 30:27 */
-	if (hw->media_type != e1000_media_type_copper)
+	if (hw->phy.media_type != e1000_media_type_copper)
 		reg &= ~(1 << 20);
-	ew32(TARC0, reg);
+	ew32(TARC(0), reg);
 
 	/* Transmit Arbitration Control 1 */
-	reg = er32(TARC1);
+	reg = er32(TARC(1));
 	if (er32(TCTL) & E1000_TCTL_MULR)
 		reg &= ~(1 << 28);
 	else
 		reg |= (1 << 28);
-	ew32(TARC1, reg);
+	ew32(TARC(1), reg);
 }
 
 /**
@@ -881,7 +898,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	/* Options:
+	/*
+	 * Options:
 	 *   MDI/MDI-X = 0 (default)
 	 *   0 - Auto for all speeds
 	 *   1 - MDI mode
@@ -907,7 +925,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
 		break;
 	}
 
-	/* Options:
+	/*
+	 * Options:
 	 *   disable_polarity_correction = 0 (default)
 	 *       Automatic Correction for Reversed Cable Polarity
 	 *   0 - Disabled
@@ -928,10 +947,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
 		return ret_val;
 	}
 
-	/* Bypass RX and TX FIFO's */
-	ret_val = e1000e_write_kmrn_reg(hw,
-				E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
-				E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+	/* Bypass Rx and Tx FIFO's */
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
+					E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
 					E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
 	if (ret_val)
 		return ret_val;
@@ -953,7 +971,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	/* Do not init these registers when the HW is in IAMT mode, since the
+	/*
+	 * Do not init these registers when the HW is in IAMT mode, since the
 	 * firmware will have already initialized them.  We only initialize
 	 * them if the HW is not in IAMT mode.
 	 */
@@ -974,7 +993,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
 			return ret_val;
 	}
 
-	/* Workaround: Disable padding in Kumeran interface in the MAC
+	/*
+	 * Workaround: Disable padding in Kumeran interface in the MAC
 	 * and in the PHY to avoid CRC errors.
 	 */
 	ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
@@ -1007,9 +1027,11 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
 	ew32(CTRL, ctrl);
 
-	/* Set the mac to wait the maximum time between each
+	/*
+	 * Set the mac to wait the maximum time between each
 	 * iteration and increase the max iterations when
-	 * polling the phy; this fixes erroneous timeouts at 10Mbps. */
+	 * polling the phy; this fixes erroneous timeouts at 10Mbps.
+	 */
 	ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
 	if (ret_val)
 		return ret_val;
@@ -1026,9 +1048,8 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 	reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
-	ret_val = e1000e_write_kmrn_reg(hw,
-				       E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
-				       reg_data);
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+					reg_data);
 	if (ret_val)
 		return ret_val;
 
@@ -1056,9 +1077,8 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
 	u16 reg_data;
 
 	reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
-	ret_val = e1000e_write_kmrn_reg(hw,
-				       E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
-				       reg_data);
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+					reg_data);
 	if (ret_val)
 		return ret_val;
 
@@ -1096,9 +1116,8 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
 	u32 tipg;
 
 	reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
-	ret_val = e1000e_write_kmrn_reg(hw,
-				       E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
-				       reg_data);
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+					reg_data);
 	if (ret_val)
 		return ret_val;
 
@@ -1175,7 +1194,7 @@ static struct e1000_mac_operations es2_mac_ops = {
 	.get_link_up_info	= e1000_get_link_up_info_80003es2lan,
 	.led_on			= e1000e_led_on_generic,
 	.led_off		= e1000e_led_off_generic,
-	.mc_addr_list_update	= e1000e_mc_addr_list_update_generic,
+	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
 	.reset_hw		= e1000_reset_hw_80003es2lan,
 	.init_hw		= e1000_init_hw_80003es2lan,
 	.setup_link		= e1000e_setup_link,
@@ -1224,7 +1243,7 @@ struct e1000_info e1000_es2_info = {
 				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
 				  | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
 	.pba			= 38,
-	.get_invariants		= e1000_get_invariants_80003es2lan,
+	.get_variants		= e1000_get_variants_80003es2lan,
 	.mac_ops		= &es2_mac_ops,
 	.phy_ops		= &es2_phy_ops,
 	.nvm_ops		= &es2_nvm_ops,

+ 157 - 125
drivers/net/e1000e/ethtool.c

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -102,7 +102,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
 	"Interrupt test (offline)", "Loopback test  (offline)",
 	"Link test   (on/offline)"
 };
-#define E1000_TEST_LEN	ARRAY_SIZE(e1000_gstrings_test)
+#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
 
 static int e1000_get_settings(struct net_device *netdev,
 			      struct ethtool_cmd *ecmd)
@@ -111,7 +111,7 @@ static int e1000_get_settings(struct net_device *netdev,
 	struct e1000_hw *hw = &adapter->hw;
 	u32 status;
 
-	if (hw->media_type == e1000_media_type_copper) {
+	if (hw->phy.media_type == e1000_media_type_copper) {
 
 		ecmd->supported = (SUPPORTED_10baseT_Half |
 				   SUPPORTED_10baseT_Full |
@@ -165,7 +165,7 @@ static int e1000_get_settings(struct net_device *netdev,
 		ecmd->duplex = -1;
 	}
 
-	ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+	ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
 			 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
 	return 0;
 }
@@ -187,7 +187,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
 	mac->autoneg = 0;
 
 	/* Fiber NICs only allow 1000 gbps Full duplex */
-	if ((adapter->hw.media_type == e1000_media_type_fiber) &&
+	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
 		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
 		ndev_err(adapter->netdev, "Unsupported Speed/Duplex "
 			 "configuration\n");
@@ -226,8 +226,10 @@ static int e1000_set_settings(struct net_device *netdev,
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 
-	/* When SoL/IDER sessions are active, autoneg/speed/duplex
-	 * cannot be changed */
+	/*
+	 * When SoL/IDER sessions are active, autoneg/speed/duplex
+	 * cannot be changed
+	 */
 	if (e1000_check_reset_block(hw)) {
 		ndev_err(netdev, "Cannot change link "
 			 "characteristics when SoL/IDER is active.\n");
@@ -239,7 +241,7 @@ static int e1000_set_settings(struct net_device *netdev,
 
 	if (ecmd->autoneg == AUTONEG_ENABLE) {
 		hw->mac.autoneg = 1;
-		if (hw->media_type == e1000_media_type_fiber)
+		if (hw->phy.media_type == e1000_media_type_fiber)
 			hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
 						     ADVERTISED_FIBRE |
 						     ADVERTISED_Autoneg;
@@ -248,6 +250,8 @@ static int e1000_set_settings(struct net_device *netdev,
 						     ADVERTISED_TP |
 						     ADVERTISED_Autoneg;
 		ecmd->advertising = hw->phy.autoneg_advertised;
+		if (adapter->fc_autoneg)
+			hw->fc.original_type = e1000_fc_default;
 	} else {
 		if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
 			clear_bit(__E1000_RESETTING, &adapter->state);
@@ -277,11 +281,11 @@ static void e1000_get_pauseparam(struct net_device *netdev,
 	pause->autoneg =
 		(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
 
-	if (hw->mac.fc == e1000_fc_rx_pause) {
+	if (hw->fc.type == e1000_fc_rx_pause) {
 		pause->rx_pause = 1;
-	} else if (hw->mac.fc == e1000_fc_tx_pause) {
+	} else if (hw->fc.type == e1000_fc_tx_pause) {
 		pause->tx_pause = 1;
-	} else if (hw->mac.fc == e1000_fc_full) {
+	} else if (hw->fc.type == e1000_fc_full) {
 		pause->rx_pause = 1;
 		pause->tx_pause = 1;
 	}
@@ -300,18 +304,18 @@ static int e1000_set_pauseparam(struct net_device *netdev,
 		msleep(1);
 
 	if (pause->rx_pause && pause->tx_pause)
-		hw->mac.fc = e1000_fc_full;
+		hw->fc.type = e1000_fc_full;
 	else if (pause->rx_pause && !pause->tx_pause)
-		hw->mac.fc = e1000_fc_rx_pause;
+		hw->fc.type = e1000_fc_rx_pause;
 	else if (!pause->rx_pause && pause->tx_pause)
-		hw->mac.fc = e1000_fc_tx_pause;
+		hw->fc.type = e1000_fc_tx_pause;
 	else if (!pause->rx_pause && !pause->tx_pause)
-		hw->mac.fc = e1000_fc_none;
+		hw->fc.type = e1000_fc_none;
 
-	hw->mac.original_fc = hw->mac.fc;
+	hw->fc.original_type = hw->fc.type;
 
 	if (adapter->fc_autoneg == AUTONEG_ENABLE) {
-		hw->mac.fc = e1000_fc_default;
+		hw->fc.type = e1000_fc_default;
 		if (netif_running(adapter->netdev)) {
 			e1000e_down(adapter);
 			e1000e_up(adapter);
@@ -319,7 +323,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
 			e1000e_reset(adapter);
 		}
 	} else {
-		retval = ((hw->media_type == e1000_media_type_fiber) ?
+		retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
 			  hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw));
 	}
 
@@ -558,8 +562,10 @@ static int e1000_set_eeprom(struct net_device *netdev,
 	ret_val = e1000_write_nvm(hw, first_word,
 				  last_word - first_word + 1, eeprom_buff);
 
-	/* Update the checksum over the first part of the EEPROM if needed
-	 * and flush shadow RAM for 82573 controllers */
+	/*
+	 * Update the checksum over the first part of the EEPROM if needed
+	 * and flush shadow RAM for 82573 controllers
+	 */
 	if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
 			       (hw->mac.type == e1000_82573)))
 		e1000e_update_nvm_checksum(hw);
@@ -578,8 +584,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
 	strncpy(drvinfo->driver,  e1000e_driver_name, 32);
 	strncpy(drvinfo->version, e1000e_driver_version, 32);
 
-	/* EEPROM image version # is reported as firmware version # for
-	 * PCI-E controllers */
+	/*
+	 * EEPROM image version # is reported as firmware version # for
+	 * PCI-E controllers
+	 */
 	e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
 	sprintf(firmware_version, "%d.%d-%d",
 		(eeprom_data & 0xF000) >> 12,
@@ -633,10 +641,17 @@ static int e1000_set_ringparam(struct net_device *netdev,
 	tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
 	if (!tx_ring)
 		goto err_alloc_tx;
+	/*
+	 * use a memcpy to save any previously configured
+	 * items like napi structs from having to be
+	 * reinitialized
+	 */
+	memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
 
 	rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
 	if (!rx_ring)
 		goto err_alloc_rx;
+	memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
 
 	adapter->tx_ring = tx_ring;
 	adapter->rx_ring = rx_ring;
@@ -658,8 +673,10 @@ static int e1000_set_ringparam(struct net_device *netdev,
 		if (err)
 			goto err_setup_tx;
 
-		/* save the new, restore the old in order to free it,
-		 * then restore the new back again */
+		/*
+		 * restore the old in order to free it,
+		 * then add in the new
+		 */
 		adapter->rx_ring = rx_old;
 		adapter->tx_ring = tx_old;
 		e1000e_free_rx_resources(adapter);
@@ -690,61 +707,55 @@ err_setup:
 	return err;
 }
 
-static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data,
-				   int reg, int offset, u32 mask, u32 write)
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
+			     int reg, int offset, u32 mask, u32 write)
 {
-	int i;
-	u32 read;
+	u32 pat, val;
 	static const u32 test[] =
 		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
-	for (i = 0; i < ARRAY_SIZE(test); i++) {
+	for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
 		E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
-				      (test[i] & write));
-		read = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
-		if (read != (test[i] & write & mask)) {
+				      (test[pat] & write));
+		val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
+		if (val != (test[pat] & write & mask)) {
 			ndev_err(adapter->netdev, "pattern test reg %04X "
 				 "failed: got 0x%08X expected 0x%08X\n",
 				 reg + offset,
-				 read, (test[i] & write & mask));
+				 val, (test[pat] & write & mask));
 			*data = reg;
-			return true;
+			return 1;
 		}
 	}
-	return false;
+	return 0;
 }
 
 static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
 			      int reg, u32 mask, u32 write)
 {
-	u32 read;
+	u32 val;
 	__ew32(&adapter->hw, reg, write & mask);
-	read = __er32(&adapter->hw, reg);
-	if ((write & mask) != (read & mask)) {
+	val = __er32(&adapter->hw, reg);
+	if ((write & mask) != (val & mask)) {
 		ndev_err(adapter->netdev, "set/check reg %04X test failed: "
-			 "got 0x%08X expected 0x%08X\n", reg, (read & mask),
+			 "got 0x%08X expected 0x%08X\n", reg, (val & mask),
 			 (write & mask));
 		*data = reg;
-		return true;
+		return 1;
 	}
-	return false;
+	return 0;
 }
-
-#define REG_PATTERN_TEST(R, M, W) \
-	do { \
-		if (reg_pattern_test_array(adapter, data, R, 0, M, W)) \
-			return 1; \
+#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write)                       \
+	do {                                                                   \
+		if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
+			return 1;                                              \
 	} while (0)
+#define REG_PATTERN_TEST(reg, mask, write)                                     \
+	REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
 
-#define REG_PATTERN_TEST_ARRAY(R, offset, M, W) \
-	do { \
-		if (reg_pattern_test_array(adapter, data, R, offset, M, W)) \
-			return 1; \
-	} while (0)
-
-#define REG_SET_AND_CHECK(R, M, W) \
-	do { \
-		if (reg_set_and_check(adapter, data, R, M, W)) \
-			return 1; \
+#define REG_SET_AND_CHECK(reg, mask, write)                                    \
+	do {                                                                   \
+		if (reg_set_and_check(adapter, data, reg, mask, write))        \
+			return 1;                                              \
 	} while (0)
 
 static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
@@ -758,7 +769,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
 	u32 i;
 	u32 toggle;
 
-	/* The status register is Read Only, so a write should fail.
+	/*
+	 * The status register is Read Only, so a write should fail.
 	 * Some bits that get toggled are ignored.
 	 */
 	switch (mac->type) {
@@ -908,7 +920,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
 		mask = 1 << i;
 
 		if (!shared_int) {
-			/* Disable the interrupt to be reported in
+			/*
+			 * Disable the interrupt to be reported in
 			 * the cause register and then force the same
 			 * interrupt and see if one gets posted.  If
 			 * an interrupt was posted to the bus, the
@@ -925,7 +938,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
 			}
 		}
 
-		/* Enable the interrupt to be reported in
+		/*
+		 * Enable the interrupt to be reported in
 		 * the cause register and then force the same
 		 * interrupt and see if one gets posted.  If
 		 * an interrupt was not posted to the bus, the
@@ -942,7 +956,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
 		}
 
 		if (!shared_int) {
-			/* Disable the other interrupts to be reported in
+			/*
+			 * Disable the other interrupts to be reported in
 			 * the cause register and then force the other
 			 * interrupts and see if any get posted.  If
 			 * an interrupt was posted to the bus, the
@@ -1024,7 +1039,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 rctl;
-	int size;
 	int i;
 	int ret_val;
 
@@ -1033,13 +1047,13 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 	if (!tx_ring->count)
 		tx_ring->count = E1000_DEFAULT_TXD;
 
-	size = tx_ring->count * sizeof(struct e1000_buffer);
-	tx_ring->buffer_info = kmalloc(size, GFP_KERNEL);
-	if (!tx_ring->buffer_info) {
+	tx_ring->buffer_info = kcalloc(tx_ring->count,
+				       sizeof(struct e1000_buffer),
+				       GFP_KERNEL);
+	if (!(tx_ring->buffer_info)) {
 		ret_val = 1;
 		goto err_nomem;
 	}
-	memset(tx_ring->buffer_info, 0, size);
 
 	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
 	tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -1049,21 +1063,17 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 		ret_val = 2;
 		goto err_nomem;
 	}
-	memset(tx_ring->desc, 0, tx_ring->size);
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
 
-	ew32(TDBAL,
-			((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+	ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
 	ew32(TDBAH, ((u64) tx_ring->dma >> 32));
-	ew32(TDLEN,
-			tx_ring->count * sizeof(struct e1000_tx_desc));
+	ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
 	ew32(TDH, 0);
 	ew32(TDT, 0);
-	ew32(TCTL,
-			E1000_TCTL_PSP | E1000_TCTL_EN |
-			E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
-			E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+	ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
+	     E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+	     E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
 
 	for (i = 0; i < tx_ring->count; i++) {
 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1085,12 +1095,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 			ret_val = 4;
 			goto err_nomem;
 		}
-		tx_desc->buffer_addr = cpu_to_le64(
-					 tx_ring->buffer_info[i].dma);
+		tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
 		tx_desc->lower.data = cpu_to_le32(skb->len);
 		tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
 						   E1000_TXD_CMD_IFCS |
-						   E1000_TXD_CMD_RPS);
+						   E1000_TXD_CMD_RS);
 		tx_desc->upper.data = 0;
 	}
 
@@ -1099,13 +1108,13 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 	if (!rx_ring->count)
 		rx_ring->count = E1000_DEFAULT_RXD;
 
-	size = rx_ring->count * sizeof(struct e1000_buffer);
-	rx_ring->buffer_info = kmalloc(size, GFP_KERNEL);
-	if (!rx_ring->buffer_info) {
+	rx_ring->buffer_info = kcalloc(rx_ring->count,
+				       sizeof(struct e1000_buffer),
+				       GFP_KERNEL);
+	if (!(rx_ring->buffer_info)) {
 		ret_val = 5;
 		goto err_nomem;
 	}
-	memset(rx_ring->buffer_info, 0, size);
 
 	rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
 	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
@@ -1114,7 +1123,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 		ret_val = 6;
 		goto err_nomem;
 	}
-	memset(rx_ring->desc, 0, rx_ring->size);
 	rx_ring->next_to_use = 0;
 	rx_ring->next_to_clean = 0;
 
@@ -1126,6 +1134,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 	ew32(RDH, 0);
 	ew32(RDT, 0);
 	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+		E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
+		E1000_RCTL_SBP | E1000_RCTL_SECRC |
 		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
 		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
 	ew32(RCTL, rctl);
@@ -1175,21 +1185,22 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 	u32 ctrl_reg = 0;
 	u32 stat_reg = 0;
 
-	adapter->hw.mac.autoneg = 0;
+	hw->mac.autoneg = 0;
 
-	if (adapter->hw.phy.type == e1000_phy_m88) {
+	if (hw->phy.type == e1000_phy_m88) {
 		/* Auto-MDI/MDIX Off */
 		e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
 		/* reset to update Auto-MDI/MDIX */
 		e1e_wphy(hw, PHY_CONTROL, 0x9140);
 		/* autoneg off */
 		e1e_wphy(hw, PHY_CONTROL, 0x8140);
-	} else if (adapter->hw.phy.type == e1000_phy_gg82563)
+	} else if (hw->phy.type == e1000_phy_gg82563)
 		e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
 
 	ctrl_reg = er32(CTRL);
 
-	if (adapter->hw.phy.type == e1000_phy_ife) {
+	switch (hw->phy.type) {
+	case e1000_phy_ife:
 		/* force 100, set loopback */
 		e1e_wphy(hw, PHY_CONTROL, 0x6100);
 
@@ -1199,9 +1210,11 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
 			     E1000_CTRL_SPD_100 |/* Force Speed to 100 */
 			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
-	} else {
+		break;
+	default:
 		/* force 1000, set loopback */
 		e1e_wphy(hw, PHY_CONTROL, 0x4140);
+		mdelay(250);
 
 		/* Now set up the MAC to the same speed/duplex as the PHY. */
 		ctrl_reg = er32(CTRL);
@@ -1210,14 +1223,20 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
 			     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
 			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+
+		if ((adapter->hw.mac.type == e1000_ich8lan) ||
+		    (adapter->hw.mac.type == e1000_ich9lan))
+			ctrl_reg |= E1000_CTRL_SLU;	/* Set Link Up */
 	}
 
-	if (adapter->hw.media_type == e1000_media_type_copper &&
-	   adapter->hw.phy.type == e1000_phy_m88) {
+	if (hw->phy.media_type == e1000_media_type_copper &&
+	    hw->phy.type == e1000_phy_m88) {
 		ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
 	} else {
-		/* Set the ILOS bit on the fiber Nic if half duplex link is
-		 * detected. */
+		/*
+		 * Set the ILOS bit on the fiber Nic if half duplex link is
+		 * detected.
+		 */
 		stat_reg = er32(STATUS);
 		if ((stat_reg & E1000_STATUS_FD) == 0)
 			ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@@ -1225,10 +1244,11 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 
 	ew32(CTRL, ctrl_reg);
 
-	/* Disable the receiver on the PHY so when a cable is plugged in, the
+	/*
+	 * Disable the receiver on the PHY so when a cable is plugged in, the
 	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
 	 */
-	if (adapter->hw.phy.type == e1000_phy_m88)
+	if (hw->phy.type == e1000_phy_m88)
 		e1000_phy_disable_receiver(adapter);
 
 	udelay(500);
@@ -1244,8 +1264,10 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
 
 	/* special requirements for 82571/82572 fiber adapters */
 
-	/* jump through hoops to make sure link is up because serdes
-	 * link is hardwired up */
+	/*
+	 * jump through hoops to make sure link is up because serdes
+	 * link is hardwired up
+	 */
 	ctrl |= E1000_CTRL_SLU;
 	ew32(CTRL, ctrl);
 
@@ -1263,8 +1285,10 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
 		ew32(CTRL, ctrl);
 	}
 
-	/* special write to serdes control register to enable SerDes analog
-	 * loopback */
+	/*
+	 * special write to serdes control register to enable SerDes analog
+	 * loopback
+	 */
 #define E1000_SERDES_LB_ON 0x410
 	ew32(SCTL, E1000_SERDES_LB_ON);
 	msleep(10);
@@ -1279,8 +1303,10 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
 	u32 ctrlext = er32(CTRL_EXT);
 	u32 ctrl = er32(CTRL);
 
-	/* save CTRL_EXT to restore later, reuse an empty variable (unused
-	   on mac_type 80003es2lan) */
+	/*
+	 * save CTRL_EXT to restore later, reuse an empty variable (unused
+	 * on mac_type 80003es2lan)
+	 */
 	adapter->tx_fifo_head = ctrlext;
 
 	/* clear the serdes mode bits, putting the device into mac loopback */
@@ -1302,7 +1328,7 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
 #define KMRNCTRLSTA_OPMODE (0x1F << 16)
 #define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
 	ew32(KMRNCTRLSTA,
-		(KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
+	     (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
 
 	return 0;
 }
@@ -1312,8 +1338,8 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 rctl;
 
-	if (hw->media_type == e1000_media_type_fiber ||
-	    hw->media_type == e1000_media_type_internal_serdes) {
+	if (hw->phy.media_type == e1000_media_type_fiber ||
+	    hw->phy.media_type == e1000_media_type_internal_serdes) {
 		switch (hw->mac.type) {
 		case e1000_80003es2lan:
 			return e1000_set_es2lan_mac_loopback(adapter);
@@ -1328,7 +1354,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
 			ew32(RCTL, rctl);
 			return 0;
 		}
-	} else if (hw->media_type == e1000_media_type_copper) {
+	} else if (hw->phy.media_type == e1000_media_type_copper) {
 		return e1000_integrated_phy_loopback(adapter);
 	}
 
@@ -1347,18 +1373,17 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
 
 	switch (hw->mac.type) {
 	case e1000_80003es2lan:
-		if (hw->media_type == e1000_media_type_fiber ||
-		    hw->media_type == e1000_media_type_internal_serdes) {
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes) {
 			/* restore CTRL_EXT, stealing space from tx_fifo_head */
-			ew32(CTRL_EXT,
-					adapter->tx_fifo_head);
+			ew32(CTRL_EXT, adapter->tx_fifo_head);
 			adapter->tx_fifo_head = 0;
 		}
 		/* fall through */
 	case e1000_82571:
 	case e1000_82572:
-		if (hw->media_type == e1000_media_type_fiber ||
-		    hw->media_type == e1000_media_type_internal_serdes) {
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes) {
 #define E1000_SERDES_LB_OFF 0x400
 			ew32(SCTL, E1000_SERDES_LB_OFF);
 			msleep(10);
@@ -1414,7 +1439,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
 
 	ew32(RDT, rx_ring->count - 1);
 
-	/* Calculate the loop count based on the largest descriptor ring
+	/*
+	 * Calculate the loop count based on the largest descriptor ring
 	 * The idea is to wrap the largest ring a number of times using 64
 	 * send/receive pairs during each loop
 	 */
@@ -1428,8 +1454,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
 	l = 0;
 	for (j = 0; j <= lc; j++) { /* loop count loop */
 		for (i = 0; i < 64; i++) { /* send the packets */
-			e1000_create_lbtest_frame(
-				tx_ring->buffer_info[i].skb, 1024);
+			e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
+						  1024);
 			pci_dma_sync_single_for_device(pdev,
 					tx_ring->buffer_info[k].dma,
 					tx_ring->buffer_info[k].length,
@@ -1454,7 +1480,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
 			l++;
 			if (l == rx_ring->count)
 				l = 0;
-			/* time + 20 msecs (200 msecs on 2.4) is more than
+			/*
+			 * time + 20 msecs (200 msecs on 2.4) is more than
 			 * enough time to complete the receives, if it's
 			 * exceeded, break and error off
 			 */
@@ -1463,7 +1490,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
 			ret_val = 13; /* ret_val is the same as mis-compare */
 			break;
 		}
-		if (jiffies >= (time + 2)) {
+		if (jiffies >= (time + 20)) {
 			ret_val = 14; /* error code for time out error */
 			break;
 		}
@@ -1473,8 +1500,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
 
 static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
 {
-	/* PHY loopback cannot be performed if SoL/IDER
-	 * sessions are active */
+	/*
+	 * PHY loopback cannot be performed if SoL/IDER
+	 * sessions are active
+	 */
 	if (e1000_check_reset_block(&adapter->hw)) {
 		ndev_err(adapter->netdev, "Cannot do PHY loopback test "
 			 "when SoL/IDER is active.\n");
@@ -1504,12 +1533,14 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
 	struct e1000_hw *hw = &adapter->hw;
 
 	*data = 0;
-	if (hw->media_type == e1000_media_type_internal_serdes) {
+	if (hw->phy.media_type == e1000_media_type_internal_serdes) {
 		int i = 0;
 		hw->mac.serdes_has_link = 0;
 
-		/* On some blade server designs, link establishment
-		 * could take as long as 2-3 minutes */
+		/*
+		 * On some blade server designs, link establishment
+		 * could take as long as 2-3 minutes
+		 */
 		do {
 			hw->mac.ops.check_for_link(hw);
 			if (hw->mac.serdes_has_link)
@@ -1562,8 +1593,10 @@ static void e1000_diag_test(struct net_device *netdev,
 
 		ndev_info(netdev, "offline testing starting\n");
 
-		/* Link test performed before hardware reset so autoneg doesn't
-		 * interfere with test result */
+		/*
+		 * Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result
+		 */
 		if (e1000_link_test(adapter, &data[4]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1596,9 +1629,9 @@ static void e1000_diag_test(struct net_device *netdev,
 		adapter->hw.mac.autoneg = autoneg;
 
 		/* force this routine to wait until autoneg complete/timeout */
-		adapter->hw.phy.wait_for_link = 1;
+		adapter->hw.phy.autoneg_wait_to_complete = 1;
 		e1000e_reset(adapter);
-		adapter->hw.phy.wait_for_link = 0;
+		adapter->hw.phy.autoneg_wait_to_complete = 0;
 
 		clear_bit(__E1000_TESTING, &adapter->state);
 		if (if_running)
@@ -1768,8 +1801,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
 
 	switch (stringset) {
 	case ETH_SS_TEST:
-		memcpy(data, *e1000_gstrings_test,
-			sizeof(e1000_gstrings_test));
+		memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
 		break;
 	case ETH_SS_STATS:
 		for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {

+ 89 - 84
drivers/net/e1000e/hw.h

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -66,14 +66,14 @@ enum e1e_registers {
 	E1000_IMS      = 0x000D0, /* Interrupt Mask Set - RW */
 	E1000_IMC      = 0x000D8, /* Interrupt Mask Clear - WO */
 	E1000_IAM      = 0x000E0, /* Interrupt Acknowledge Auto Mask */
-	E1000_RCTL     = 0x00100, /* RX Control - RW */
+	E1000_RCTL     = 0x00100, /* Rx Control - RW */
 	E1000_FCTTV    = 0x00170, /* Flow Control Transmit Timer Value - RW */
-	E1000_TXCW     = 0x00178, /* TX Configuration Word - RW */
-	E1000_RXCW     = 0x00180, /* RX Configuration Word - RO */
-	E1000_TCTL     = 0x00400, /* TX Control - RW */
-	E1000_TCTL_EXT = 0x00404, /* Extended TX Control - RW */
-	E1000_TIPG     = 0x00410, /* TX Inter-packet gap -RW */
-	E1000_AIT      = 0x00458, /* Adaptive Interframe Spacing Throttle - RW */
+	E1000_TXCW     = 0x00178, /* Tx Configuration Word - RW */
+	E1000_RXCW     = 0x00180, /* Rx Configuration Word - RO */
+	E1000_TCTL     = 0x00400, /* Tx Control - RW */
+	E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
+	E1000_TIPG     = 0x00410, /* Tx Inter-packet gap -RW */
+	E1000_AIT      = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
 	E1000_LEDCTL   = 0x00E00, /* LED Control - RW */
 	E1000_EXTCNF_CTRL  = 0x00F00, /* Extended Configuration Control */
 	E1000_EXTCNF_SIZE  = 0x00F08, /* Extended Configuration Size */
@@ -87,12 +87,14 @@ enum e1e_registers {
 	E1000_FCRTL    = 0x02160, /* Flow Control Receive Threshold Low - RW */
 	E1000_FCRTH    = 0x02168, /* Flow Control Receive Threshold High - RW */
 	E1000_PSRCTL   = 0x02170, /* Packet Split Receive Control - RW */
-	E1000_RDBAL    = 0x02800, /* RX Descriptor Base Address Low - RW */
-	E1000_RDBAH    = 0x02804, /* RX Descriptor Base Address High - RW */
-	E1000_RDLEN    = 0x02808, /* RX Descriptor Length - RW */
-	E1000_RDH      = 0x02810, /* RX Descriptor Head - RW */
-	E1000_RDT      = 0x02818, /* RX Descriptor Tail - RW */
-	E1000_RDTR     = 0x02820, /* RX Delay Timer - RW */
+	E1000_RDBAL    = 0x02800, /* Rx Descriptor Base Address Low - RW */
+	E1000_RDBAH    = 0x02804, /* Rx Descriptor Base Address High - RW */
+	E1000_RDLEN    = 0x02808, /* Rx Descriptor Length - RW */
+	E1000_RDH      = 0x02810, /* Rx Descriptor Head - RW */
+	E1000_RDT      = 0x02818, /* Rx Descriptor Tail - RW */
+	E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
+	E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
+#define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
 	E1000_RADV     = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
 
 /* Convenience macros
@@ -105,17 +107,17 @@ enum e1e_registers {
  */
 #define E1000_RDBAL_REG(_n)   (E1000_RDBAL + (_n << 8))
 	E1000_KABGTXD  = 0x03004, /* AFE Band Gap Transmit Ref Data */
-	E1000_TDBAL    = 0x03800, /* TX Descriptor Base Address Low - RW */
-	E1000_TDBAH    = 0x03804, /* TX Descriptor Base Address High - RW */
-	E1000_TDLEN    = 0x03808, /* TX Descriptor Length - RW */
-	E1000_TDH      = 0x03810, /* TX Descriptor Head - RW */
-	E1000_TDT      = 0x03818, /* TX Descriptor Tail - RW */
-	E1000_TIDV     = 0x03820, /* TX Interrupt Delay Value - RW */
-	E1000_TXDCTL   = 0x03828, /* TX Descriptor Control - RW */
-	E1000_TADV     = 0x0382C, /* TX Interrupt Absolute Delay Val - RW */
-	E1000_TARC0    = 0x03840, /* TX Arbitration Count (0) */
-	E1000_TXDCTL1  = 0x03928, /* TX Descriptor Control (1) - RW */
-	E1000_TARC1    = 0x03940, /* TX Arbitration Count (1) */
+	E1000_TDBAL    = 0x03800, /* Tx Descriptor Base Address Low - RW */
+	E1000_TDBAH    = 0x03804, /* Tx Descriptor Base Address High - RW */
+	E1000_TDLEN    = 0x03808, /* Tx Descriptor Length - RW */
+	E1000_TDH      = 0x03810, /* Tx Descriptor Head - RW */
+	E1000_TDT      = 0x03818, /* Tx Descriptor Tail - RW */
+	E1000_TIDV     = 0x03820, /* Tx Interrupt Delay Value - RW */
+	E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
+#define E1000_TXDCTL(_n)   (E1000_TXDCTL_BASE + (_n << 8))
+	E1000_TADV     = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
+	E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
+#define E1000_TARC(_n)   (E1000_TARC_BASE + (_n << 8))
 	E1000_CRCERRS  = 0x04000, /* CRC Error Count - R/clr */
 	E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
 	E1000_SYMERRS  = 0x04008, /* Symbol Error Count - R/clr */
@@ -127,53 +129,53 @@ enum e1e_registers {
 	E1000_LATECOL  = 0x04020, /* Late Collision Count - R/clr */
 	E1000_COLC     = 0x04028, /* Collision Count - R/clr */
 	E1000_DC       = 0x04030, /* Defer Count - R/clr */
-	E1000_TNCRS    = 0x04034, /* TX-No CRS - R/clr */
+	E1000_TNCRS    = 0x04034, /* Tx-No CRS - R/clr */
 	E1000_SEC      = 0x04038, /* Sequence Error Count - R/clr */
 	E1000_CEXTERR  = 0x0403C, /* Carrier Extension Error Count - R/clr */
 	E1000_RLEC     = 0x04040, /* Receive Length Error Count - R/clr */
-	E1000_XONRXC   = 0x04048, /* XON RX Count - R/clr */
-	E1000_XONTXC   = 0x0404C, /* XON TX Count - R/clr */
-	E1000_XOFFRXC  = 0x04050, /* XOFF RX Count - R/clr */
-	E1000_XOFFTXC  = 0x04054, /* XOFF TX Count - R/clr */
-	E1000_FCRUC    = 0x04058, /* Flow Control RX Unsupported Count- R/clr */
-	E1000_PRC64    = 0x0405C, /* Packets RX (64 bytes) - R/clr */
-	E1000_PRC127   = 0x04060, /* Packets RX (65-127 bytes) - R/clr */
-	E1000_PRC255   = 0x04064, /* Packets RX (128-255 bytes) - R/clr */
-	E1000_PRC511   = 0x04068, /* Packets RX (255-511 bytes) - R/clr */
-	E1000_PRC1023  = 0x0406C, /* Packets RX (512-1023 bytes) - R/clr */
-	E1000_PRC1522  = 0x04070, /* Packets RX (1024-1522 bytes) - R/clr */
-	E1000_GPRC     = 0x04074, /* Good Packets RX Count - R/clr */
-	E1000_BPRC     = 0x04078, /* Broadcast Packets RX Count - R/clr */
-	E1000_MPRC     = 0x0407C, /* Multicast Packets RX Count - R/clr */
-	E1000_GPTC     = 0x04080, /* Good Packets TX Count - R/clr */
-	E1000_GORCL    = 0x04088, /* Good Octets RX Count Low - R/clr */
-	E1000_GORCH    = 0x0408C, /* Good Octets RX Count High - R/clr */
-	E1000_GOTCL    = 0x04090, /* Good Octets TX Count Low - R/clr */
-	E1000_GOTCH    = 0x04094, /* Good Octets TX Count High - R/clr */
-	E1000_RNBC     = 0x040A0, /* RX No Buffers Count - R/clr */
-	E1000_RUC      = 0x040A4, /* RX Undersize Count - R/clr */
-	E1000_RFC      = 0x040A8, /* RX Fragment Count - R/clr */
-	E1000_ROC      = 0x040AC, /* RX Oversize Count - R/clr */
-	E1000_RJC      = 0x040B0, /* RX Jabber Count - R/clr */
-	E1000_MGTPRC   = 0x040B4, /* Management Packets RX Count - R/clr */
+	E1000_XONRXC   = 0x04048, /* XON Rx Count - R/clr */
+	E1000_XONTXC   = 0x0404C, /* XON Tx Count - R/clr */
+	E1000_XOFFRXC  = 0x04050, /* XOFF Rx Count - R/clr */
+	E1000_XOFFTXC  = 0x04054, /* XOFF Tx Count - R/clr */
+	E1000_FCRUC    = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
+	E1000_PRC64    = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
+	E1000_PRC127   = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
+	E1000_PRC255   = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
+	E1000_PRC511   = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
+	E1000_PRC1023  = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
+	E1000_PRC1522  = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
+	E1000_GPRC     = 0x04074, /* Good Packets Rx Count - R/clr */
+	E1000_BPRC     = 0x04078, /* Broadcast Packets Rx Count - R/clr */
+	E1000_MPRC     = 0x0407C, /* Multicast Packets Rx Count - R/clr */
+	E1000_GPTC     = 0x04080, /* Good Packets Tx Count - R/clr */
+	E1000_GORCL    = 0x04088, /* Good Octets Rx Count Low - R/clr */
+	E1000_GORCH    = 0x0408C, /* Good Octets Rx Count High - R/clr */
+	E1000_GOTCL    = 0x04090, /* Good Octets Tx Count Low - R/clr */
+	E1000_GOTCH    = 0x04094, /* Good Octets Tx Count High - R/clr */
+	E1000_RNBC     = 0x040A0, /* Rx No Buffers Count - R/clr */
+	E1000_RUC      = 0x040A4, /* Rx Undersize Count - R/clr */
+	E1000_RFC      = 0x040A8, /* Rx Fragment Count - R/clr */
+	E1000_ROC      = 0x040AC, /* Rx Oversize Count - R/clr */
+	E1000_RJC      = 0x040B0, /* Rx Jabber Count - R/clr */
+	E1000_MGTPRC   = 0x040B4, /* Management Packets Rx Count - R/clr */
 	E1000_MGTPDC   = 0x040B8, /* Management Packets Dropped Count - R/clr */
-	E1000_MGTPTC   = 0x040BC, /* Management Packets TX Count - R/clr */
-	E1000_TORL     = 0x040C0, /* Total Octets RX Low - R/clr */
-	E1000_TORH     = 0x040C4, /* Total Octets RX High - R/clr */
-	E1000_TOTL     = 0x040C8, /* Total Octets TX Low - R/clr */
-	E1000_TOTH     = 0x040CC, /* Total Octets TX High - R/clr */
-	E1000_TPR      = 0x040D0, /* Total Packets RX - R/clr */
-	E1000_TPT      = 0x040D4, /* Total Packets TX - R/clr */
-	E1000_PTC64    = 0x040D8, /* Packets TX (64 bytes) - R/clr */
-	E1000_PTC127   = 0x040DC, /* Packets TX (65-127 bytes) - R/clr */
-	E1000_PTC255   = 0x040E0, /* Packets TX (128-255 bytes) - R/clr */
-	E1000_PTC511   = 0x040E4, /* Packets TX (256-511 bytes) - R/clr */
-	E1000_PTC1023  = 0x040E8, /* Packets TX (512-1023 bytes) - R/clr */
-	E1000_PTC1522  = 0x040EC, /* Packets TX (1024-1522 Bytes) - R/clr */
-	E1000_MPTC     = 0x040F0, /* Multicast Packets TX Count - R/clr */
-	E1000_BPTC     = 0x040F4, /* Broadcast Packets TX Count - R/clr */
-	E1000_TSCTC    = 0x040F8, /* TCP Segmentation Context TX - R/clr */
-	E1000_TSCTFC   = 0x040FC, /* TCP Segmentation Context TX Fail - R/clr */
+	E1000_MGTPTC   = 0x040BC, /* Management Packets Tx Count - R/clr */
+	E1000_TORL     = 0x040C0, /* Total Octets Rx Low - R/clr */
+	E1000_TORH     = 0x040C4, /* Total Octets Rx High - R/clr */
+	E1000_TOTL     = 0x040C8, /* Total Octets Tx Low - R/clr */
+	E1000_TOTH     = 0x040CC, /* Total Octets Tx High - R/clr */
+	E1000_TPR      = 0x040D0, /* Total Packets Rx - R/clr */
+	E1000_TPT      = 0x040D4, /* Total Packets Tx - R/clr */
+	E1000_PTC64    = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
+	E1000_PTC127   = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
+	E1000_PTC255   = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
+	E1000_PTC511   = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
+	E1000_PTC1023  = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
+	E1000_PTC1522  = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
+	E1000_MPTC     = 0x040F0, /* Multicast Packets Tx Count - R/clr */
+	E1000_BPTC     = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
+	E1000_TSCTC    = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
+	E1000_TSCTFC   = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
 	E1000_IAC      = 0x04100, /* Interrupt Assertion Count */
 	E1000_ICRXPTC  = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
 	E1000_ICRXATC  = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
@@ -183,7 +185,7 @@ enum e1e_registers {
 	E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
 	E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
 	E1000_ICRXOC   = 0x04124, /* Irq Cause Receiver Overrun Count */
-	E1000_RXCSUM   = 0x05000, /* RX Checksum Control - RW */
+	E1000_RXCSUM   = 0x05000, /* Rx Checksum Control - RW */
 	E1000_RFCTL    = 0x05008, /* Receive Filter Control */
 	E1000_MTA      = 0x05200, /* Multicast Table Array - RW Array */
 	E1000_RA       = 0x05400, /* Receive Address - RW Array */
@@ -250,8 +252,8 @@ enum e1e_registers {
 #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK	0x1F
 
 #define E1000_HICR_EN			0x01  /* Enable bit - RO */
-#define E1000_HICR_C			0x02  /* Driver sets this bit when done
-					       * to put command in RAM */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C			0x02
 #define E1000_HICR_FW_RESET_ENABLE	0x40
 #define E1000_HICR_FW_RESET		0x80
 
@@ -400,7 +402,7 @@ enum e1000_rev_polarity{
 	e1000_rev_polarity_undefined = 0xFF
 };
 
-enum e1000_fc_mode {
+enum e1000_fc_type {
 	e1000_fc_none = 0,
 	e1000_fc_rx_pause,
 	e1000_fc_tx_pause,
@@ -685,8 +687,7 @@ struct e1000_mac_operations {
 	s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
 	s32  (*led_on)(struct e1000_hw *);
 	s32  (*led_off)(struct e1000_hw *);
-	void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32,
-					 u32);
+	void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
 	s32  (*reset_hw)(struct e1000_hw *);
 	s32  (*init_hw)(struct e1000_hw *);
 	s32  (*setup_link)(struct e1000_hw *);
@@ -728,16 +729,12 @@ struct e1000_mac_info {
 	u8 perm_addr[6];
 
 	enum e1000_mac_type type;
-	enum e1000_fc_mode  fc;
-	enum e1000_fc_mode  original_fc;
 
 	u32 collision_delta;
 	u32 ledctl_default;
 	u32 ledctl_mode1;
 	u32 ledctl_mode2;
-	u32 max_frame_size;
 	u32 mc_filter_type;
-	u32 min_frame_size;
 	u32 tx_packet_delta;
 	u32 txcw;
 
@@ -748,9 +745,6 @@ struct e1000_mac_info {
 	u16 ifs_step_size;
 	u16 mta_reg_count;
 	u16 rar_entry_count;
-	u16 fc_high_water;
-	u16 fc_low_water;
-	u16 fc_pause_time;
 
 	u8  forced_speed_duplex;
 
@@ -780,6 +774,8 @@ struct e1000_phy_info {
 	u32 reset_delay_us; /* in usec */
 	u32 revision;
 
+	enum e1000_media_type media_type;
+
 	u16 autoneg_advertised;
 	u16 autoneg_mask;
 	u16 cable_length;
@@ -792,7 +788,7 @@ struct e1000_phy_info {
 	bool is_mdix;
 	bool polarity_correction;
 	bool speed_downgraded;
-	bool wait_for_link;
+	bool autoneg_wait_to_complete;
 };
 
 struct e1000_nvm_info {
@@ -817,6 +813,16 @@ struct e1000_bus_info {
 	u16 func;
 };
 
+struct e1000_fc_info {
+	u32 high_water;          /* Flow control high-water mark */
+	u32 low_water;           /* Flow control low-water mark */
+	u16 pause_time;          /* Flow control pause timer */
+	bool send_xon;           /* Flow control send XON */
+	bool strict_ieee;        /* Strict IEEE mode */
+	enum e1000_fc_type type; /* Type of flow control */
+	enum e1000_fc_type original_type;
+};
+
 struct e1000_dev_spec_82571 {
 	bool laa_is_present;
 	bool alt_mac_addr_is_present;
@@ -841,6 +847,7 @@ struct e1000_hw {
 	u8 __iomem *flash_address;
 
 	struct e1000_mac_info  mac;
+	struct e1000_fc_info   fc;
 	struct e1000_phy_info  phy;
 	struct e1000_nvm_info  nvm;
 	struct e1000_bus_info  bus;
@@ -850,8 +857,6 @@ struct e1000_hw {
 		struct e1000_dev_spec_82571	e82571;
 		struct e1000_dev_spec_ich8lan	ich8lan;
 	} dev_spec;
-
-	enum e1000_media_type media_type;
 };
 
 #ifdef DEBUG

+ 181 - 128
drivers/net/e1000e/ich8lan.c

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -243,8 +243,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
 	u32 sector_end_addr;
 	u16 i;
 
-	/* Can't read flash registers if the register set isn't mapped.
-	 */
+	/* Can't read flash registers if the register set isn't mapped. */
 	if (!hw->flash_address) {
 		hw_dbg(hw, "ERROR: Flash registers not mapped\n");
 		return -E1000_ERR_CONFIG;
@@ -254,17 +253,21 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
 
 	gfpreg = er32flash(ICH_FLASH_GFPREG);
 
-	/* sector_X_addr is a "sector"-aligned address (4096 bytes)
+	/*
+	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
 	 * Add 1 to sector_end_addr since this sector is included in
-	 * the overall size. */
+	 * the overall size.
+	 */
 	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
 	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
 
 	/* flash_base_addr is byte-aligned */
 	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
 
-	/* find total size of the NVM, then cut in half since the total
-	 * size represents two separate NVM banks. */
+	/*
+	 * find total size of the NVM, then cut in half since the total
+	 * size represents two separate NVM banks.
+	 */
 	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
 				<< FLASH_SECTOR_ADDR_SHIFT;
 	nvm->flash_bank_size /= 2;
@@ -295,7 +298,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
 	struct e1000_mac_info *mac = &hw->mac;
 
 	/* Set media type function pointer */
-	hw->media_type = e1000_media_type_copper;
+	hw->phy.media_type = e1000_media_type_copper;
 
 	/* Set mta register count */
 	mac->mta_reg_count = 32;
@@ -313,7 +316,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
 	return 0;
 }
 
-static s32 e1000_get_invariants_ich8lan(struct e1000_adapter *adapter)
+static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	s32 rc;
@@ -450,7 +453,7 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
 
 	udelay(1);
 
-	if (phy->wait_for_link) {
+	if (phy->autoneg_wait_to_complete) {
 		hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n");
 
 		ret_val = e1000e_phy_has_link_generic(hw,
@@ -496,7 +499,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	/* Initialize the PHY from the NVM on ICH platforms.  This
+	/*
+	 * Initialize the PHY from the NVM on ICH platforms.  This
 	 * is needed due to an issue where the NVM configuration is
 	 * not properly autoloaded after power transitions.
 	 * Therefore, after each PHY reset, we will load the
@@ -523,7 +527,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
 			udelay(100);
 		} while ((!data) && --loop);
 
-		/* If basic configuration is incomplete before the above loop
+		/*
+		 * If basic configuration is incomplete before the above loop
 		 * count reaches 0, loading the configuration from NVM will
 		 * leave the PHY in a bad state possibly resulting in no link.
 		 */
@@ -536,8 +541,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
 		data &= ~E1000_STATUS_LAN_INIT_DONE;
 		ew32(STATUS, data);
 
-		/* Make sure HW does not configure LCD from PHY
-		 * extended configuration before SW configuration */
+		/*
+		 * Make sure HW does not configure LCD from PHY
+		 * extended configuration before SW configuration
+		 */
 		data = er32(EXTCNF_CTRL);
 		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
 			return 0;
@@ -551,8 +558,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
 		cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
 		cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
 
-		/* Configure LCD from extended configuration
-		 * region. */
+		/* Configure LCD from extended configuration region. */
 
 		/* cnf_base_addr is in DWORD */
 		word_addr = (u16)(cnf_base_addr << 1);
@@ -681,8 +687,8 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
 	s32 ret_val;
 	u16 phy_data, offset, mask;
 
-	/* Polarity is determined based on the reversal feature
-	 * being enabled.
+	/*
+	 * Polarity is determined based on the reversal feature being enabled.
 	 */
 	if (phy->polarity_correction) {
 		offset	= IFE_PHY_EXTENDED_STATUS_CONTROL;
@@ -731,8 +737,10 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
 		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
 		ew32(PHY_CTRL, phy_ctrl);
 
-		/* Call gig speed drop workaround on LPLU before accessing
-		 * any PHY registers */
+		/*
+		 * Call gig speed drop workaround on LPLU before accessing
+		 * any PHY registers
+		 */
 		if ((hw->mac.type == e1000_ich8lan) &&
 		    (hw->phy.type == e1000_phy_igp_3))
 			e1000e_gig_downshift_workaround_ich8lan(hw);
@@ -747,30 +755,32 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
 		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
 		ew32(PHY_CTRL, phy_ctrl);
 
-		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
 		 * during Dx states where the power conservation is most
 		 * important.  During driver activity we should enable
-		 * SmartSpeed, so performance is maintained. */
+		 * SmartSpeed, so performance is maintained.
+		 */
 		if (phy->smart_speed == e1000_smart_speed_on) {
 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						    &data);
+					   &data);
 			if (ret_val)
 				return ret_val;
 
 			data |= IGP01E1000_PSCFR_SMART_SPEED;
 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     data);
+					   data);
 			if (ret_val)
 				return ret_val;
 		} else if (phy->smart_speed == e1000_smart_speed_off) {
 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						    &data);
+					   &data);
 			if (ret_val)
 				return ret_val;
 
 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     data);
+					   data);
 			if (ret_val)
 				return ret_val;
 		}
@@ -804,34 +814,32 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
 	if (!active) {
 		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
 		ew32(PHY_CTRL, phy_ctrl);
-		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
 		 * during Dx states where the power conservation is most
 		 * important.  During driver activity we should enable
-		 * SmartSpeed, so performance is maintained. */
+		 * SmartSpeed, so performance is maintained.
+		 */
 		if (phy->smart_speed == e1000_smart_speed_on) {
-			ret_val = e1e_rphy(hw,
-						    IGP01E1000_PHY_PORT_CONFIG,
-						    &data);
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
 			if (ret_val)
 				return ret_val;
 
 			data |= IGP01E1000_PSCFR_SMART_SPEED;
-			ret_val = e1e_wphy(hw,
-						     IGP01E1000_PHY_PORT_CONFIG,
-						     data);
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
 			if (ret_val)
 				return ret_val;
 		} else if (phy->smart_speed == e1000_smart_speed_off) {
-			ret_val = e1e_rphy(hw,
-						    IGP01E1000_PHY_PORT_CONFIG,
-						    &data);
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
 			if (ret_val)
 				return ret_val;
 
 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-			ret_val = e1e_wphy(hw,
-						     IGP01E1000_PHY_PORT_CONFIG,
-						     data);
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
 			if (ret_val)
 				return ret_val;
 		}
@@ -841,23 +849,21 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
 		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
 		ew32(PHY_CTRL, phy_ctrl);
 
-		/* Call gig speed drop workaround on LPLU before accessing
-		 * any PHY registers */
+		/*
+		 * Call gig speed drop workaround on LPLU before accessing
+		 * any PHY registers
+		 */
 		if ((hw->mac.type == e1000_ich8lan) &&
 		    (hw->phy.type == e1000_phy_igp_3))
 			e1000e_gig_downshift_workaround_ich8lan(hw);
 
 		/* When LPLU is enabled, we should disable SmartSpeed */
-		ret_val = e1e_rphy(hw,
-					    IGP01E1000_PHY_PORT_CONFIG,
-					    &data);
+		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
 		if (ret_val)
 			return ret_val;
 
 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-		ret_val = e1e_wphy(hw,
-					     IGP01E1000_PHY_PORT_CONFIG,
-					     data);
+		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
 	}
 
 	return 0;
@@ -944,7 +950,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
 
 	ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
 
-	/* Either we should have a hardware SPI cycle in progress
+	/*
+	 * Either we should have a hardware SPI cycle in progress
 	 * bit to check against, in order to start a new cycle or
 	 * FDONE bit should be changed in the hardware so that it
 	 * is 1 after hardware reset, which can then be used as an
@@ -953,15 +960,19 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
 	 */
 
 	if (hsfsts.hsf_status.flcinprog == 0) {
-		/* There is no cycle running at present,
-		 * so we can start a cycle */
-		/* Begin by setting Flash Cycle Done. */
+		/*
+		 * There is no cycle running at present,
+		 * so we can start a cycle
+		 * Begin by setting Flash Cycle Done.
+		 */
 		hsfsts.hsf_status.flcdone = 1;
 		ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
 		ret_val = 0;
 	} else {
-		/* otherwise poll for sometime so the current
-		 * cycle has a chance to end before giving up. */
+		/*
+		 * otherwise poll for sometime so the current
+		 * cycle has a chance to end before giving up.
+		 */
 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
 			hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
 			if (hsfsts.hsf_status.flcinprog == 0) {
@@ -971,8 +982,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
 			udelay(1);
 		}
 		if (ret_val == 0) {
-			/* Successful in waiting for previous cycle to timeout,
-			 * now set the Flash Cycle Done. */
+			/*
+			 * Successful in waiting for previous cycle to timeout,
+			 * now set the Flash Cycle Done.
+			 */
 			hsfsts.hsf_status.flcdone = 1;
 			ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
 		} else {
@@ -1077,10 +1090,12 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 		ret_val = e1000_flash_cycle_ich8lan(hw,
 						ICH_FLASH_READ_COMMAND_TIMEOUT);
 
-		/* Check if FCERR is set to 1, if set to 1, clear it
+		/*
+		 * Check if FCERR is set to 1, if set to 1, clear it
 		 * and try the whole sequence a few more times, else
 		 * read in (shift in) the Flash Data0, the order is
-		 * least significant byte first msb to lsb */
+		 * least significant byte first msb to lsb
+		 */
 		if (ret_val == 0) {
 			flash_data = er32flash(ICH_FLASH_FDATA0);
 			if (size == 1) {
@@ -1090,7 +1105,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 			}
 			break;
 		} else {
-			/* If we've gotten here, then things are probably
+			/*
+			 * If we've gotten here, then things are probably
 			 * completely hosed, but if the error condition is
 			 * detected, it won't hurt to give it another try...
 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -1168,18 +1184,20 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 
 	ret_val = e1000e_update_nvm_checksum_generic(hw);
 	if (ret_val)
-		return ret_val;;
+		return ret_val;
 
 	if (nvm->type != e1000_nvm_flash_sw)
-		return ret_val;;
+		return ret_val;
 
 	ret_val = e1000_acquire_swflag_ich8lan(hw);
 	if (ret_val)
-		return ret_val;;
+		return ret_val;
 
-	/* We're writing to the opposite bank so if we're on bank 1,
+	/*
+	 * We're writing to the opposite bank so if we're on bank 1,
 	 * write to bank 0 etc.  We also need to erase the segment that
-	 * is going to be written */
+	 * is going to be written
+	 */
 	if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
 		new_bank_offset = nvm->flash_bank_size;
 		old_bank_offset = 0;
@@ -1191,9 +1209,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 	}
 
 	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
-		/* Determine whether to write the value stored
+		/*
+		 * Determine whether to write the value stored
 		 * in the other NVM bank or a modified value stored
-		 * in the shadow RAM */
+		 * in the shadow RAM
+		 */
 		if (dev_spec->shadow_ram[i].modified) {
 			data = dev_spec->shadow_ram[i].value;
 		} else {
@@ -1202,12 +1222,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 						      &data);
 		}
 
-		/* If the word is 0x13, then make sure the signature bits
+		/*
+		 * If the word is 0x13, then make sure the signature bits
 		 * (15:14) are 11b until the commit has completed.
 		 * This will allow us to write 10b which indicates the
 		 * signature is valid.  We want to do this after the write
 		 * has completed so that we don't mark the segment valid
-		 * while the write is still in progress */
+		 * while the write is still in progress
+		 */
 		if (i == E1000_ICH_NVM_SIG_WORD)
 			data |= E1000_ICH_NVM_SIG_MASK;
 
@@ -1230,18 +1252,22 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 			break;
 	}
 
-	/* Don't bother writing the segment valid bits if sector
-	 * programming failed. */
+	/*
+	 * Don't bother writing the segment valid bits if sector
+	 * programming failed.
+	 */
 	if (ret_val) {
 		hw_dbg(hw, "Flash commit failed.\n");
 		e1000_release_swflag_ich8lan(hw);
 		return ret_val;
 	}
 
-	/* Finally validate the new segment by setting bit 15:14
+	/*
+	 * Finally validate the new segment by setting bit 15:14
 	 * to 10b in word 0x13 , this can be done without an
 	 * erase as well since these bits are 11 to start with
-	 * and we need to change bit 14 to 0b */
+	 * and we need to change bit 14 to 0b
+	 */
 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
 	e1000_read_flash_word_ich8lan(hw, act_offset, &data);
 	data &= 0xBFFF;
@@ -1253,10 +1279,12 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 		return ret_val;
 	}
 
-	/* And invalidate the previously valid segment by setting
+	/*
+	 * And invalidate the previously valid segment by setting
 	 * its signature word (0x13) high_byte to 0b. This can be
 	 * done without an erase because flash erase sets all bits
-	 * to 1's. We can write 1's to 0's without an erase */
+	 * to 1's. We can write 1's to 0's without an erase
+	 */
 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
 	if (ret_val) {
@@ -1272,7 +1300,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
 
 	e1000_release_swflag_ich8lan(hw);
 
-	/* Reload the EEPROM, or else modifications will not appear
+	/*
+	 * Reload the EEPROM, or else modifications will not appear
 	 * until after the next adapter reset.
 	 */
 	e1000e_reload_nvm(hw);
@@ -1294,7 +1323,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
 	s32 ret_val;
 	u16 data;
 
-	/* Read 0x19 and check bit 6.  If this bit is 0, the checksum
+	/*
+	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
 	 * needs to be fixed.  This bit is an indication that the NVM
 	 * was prepared by OEM software and did not calculate the
 	 * checksum...a likely scenario.
@@ -1364,14 +1394,17 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 
 		ew32flash(ICH_FLASH_FDATA0, flash_data);
 
-		/* check if FCERR is set to 1 , if set to 1, clear it
-		 * and try the whole sequence a few more times else done */
+		/*
+		 * check if FCERR is set to 1 , if set to 1, clear it
+		 * and try the whole sequence a few more times else done
+		 */
 		ret_val = e1000_flash_cycle_ich8lan(hw,
 					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
 		if (!ret_val)
 			break;
 
-		/* If we're here, then things are most likely
+		/*
+		 * If we're here, then things are most likely
 		 * completely hosed, but if the error condition
 		 * is detected, it won't hurt to give it another
 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -1462,9 +1495,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
 
 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
 
-	/* Determine HW Sector size: Read BERASE bits of hw flash status
-	 * register */
-	/* 00: The Hw sector is 256 bytes, hence we need to erase 16
+	/*
+	 * Determine HW Sector size: Read BERASE bits of hw flash status
+	 * register
+	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
 	 *     consecutive sectors.  The start index for the nth Hw sector
 	 *     can be calculated as = bank * 4096 + n * 256
 	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
@@ -1511,13 +1545,16 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
 			if (ret_val)
 				return ret_val;
 
-			/* Write a value 11 (block Erase) in Flash
-			 * Cycle field in hw flash control */
+			/*
+			 * Write a value 11 (block Erase) in Flash
+			 * Cycle field in hw flash control
+			 */
 			hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
 			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
 			ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
 
-			/* Write the last 24 bits of an index within the
+			/*
+			 * Write the last 24 bits of an index within the
 			 * block into Flash Linear address field in Flash
 			 * Address.
 			 */
@@ -1529,13 +1566,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
 			if (ret_val == 0)
 				break;
 
-			/* Check if FCERR is set to 1.  If 1,
+			/*
+			 * Check if FCERR is set to 1.  If 1,
 			 * clear it and try the whole sequence
-			 * a few more times else Done */
+			 * a few more times else Done
+			 */
 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
 			if (hsfsts.hsf_status.flcerr == 1)
-				/* repeat for some time before
-				 * giving up */
+				/* repeat for some time before giving up */
 				continue;
 			else if (hsfsts.hsf_status.flcdone == 0)
 				return ret_val;
@@ -1585,7 +1623,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
 
 	ret_val = e1000e_get_bus_info_pcie(hw);
 
-	/* ICH devices are "PCI Express"-ish.  They have
+	/*
+	 * ICH devices are "PCI Express"-ish.  They have
 	 * a configuration space, but do not contain
 	 * PCI Express Capability registers, so bus width
 	 * must be hardcoded.
@@ -1608,7 +1647,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 	u32 ctrl, icr, kab;
 	s32 ret_val;
 
-	/* Prevent the PCI-E bus from sticking if there is no TLP connection
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
 	 * on the last TLP read/write transaction when MAC is reset.
 	 */
 	ret_val = e1000e_disable_pcie_master(hw);
@@ -1619,7 +1659,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 	hw_dbg(hw, "Masking off all interrupts\n");
 	ew32(IMC, 0xffffffff);
 
-	/* Disable the Transmit and Receive units.  Then delay to allow
+	/*
+	 * Disable the Transmit and Receive units.  Then delay to allow
 	 * any pending transactions to complete before we hit the MAC
 	 * with the global reset.
 	 */
@@ -1640,7 +1681,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 	ctrl = er32(CTRL);
 
 	if (!e1000_check_reset_block(hw)) {
-		/* PHY HW reset requires MAC CORE reset at the same
+		/*
+		 * PHY HW reset requires MAC CORE reset at the same
 		 * time to make sure the interface between MAC and the
 		 * external PHY is reset.
 		 */
@@ -1711,21 +1753,23 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
 	ret_val = e1000_setup_link_ich8lan(hw);
 
 	/* Set the transmit descriptor write-back policy for both queues */
-	txdctl = er32(TXDCTL);
+	txdctl = er32(TXDCTL(0));
 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
 		 E1000_TXDCTL_FULL_TX_DESC_WB;
 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
-	ew32(TXDCTL, txdctl);
-	txdctl = er32(TXDCTL1);
+	ew32(TXDCTL(0), txdctl);
+	txdctl = er32(TXDCTL(1));
 	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
 		 E1000_TXDCTL_FULL_TX_DESC_WB;
 	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
 		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
-	ew32(TXDCTL1, txdctl);
+	ew32(TXDCTL(1), txdctl);
 
-	/* ICH8 has opposite polarity of no_snoop bits.
-	 * By default, we should use snoop behavior. */
+	/*
+	 * ICH8 has opposite polarity of no_snoop bits.
+	 * By default, we should use snoop behavior.
+	 */
 	if (mac->type == e1000_ich8lan)
 		snoop = PCIE_ICH8_SNOOP_ALL;
 	else
@@ -1736,7 +1780,8 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
 	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
 	ew32(CTRL_EXT, ctrl_ext);
 
-	/* Clear all of the statistics registers (clear on read).  It is
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
 	 * important that we do this after we have tried to establish link
 	 * because the symbol error count will increment wildly if there
 	 * is no link.
@@ -1762,30 +1807,30 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
 	ew32(CTRL_EXT, reg);
 
 	/* Transmit Descriptor Control 0 */
-	reg = er32(TXDCTL);
+	reg = er32(TXDCTL(0));
 	reg |= (1 << 22);
-	ew32(TXDCTL, reg);
+	ew32(TXDCTL(0), reg);
 
 	/* Transmit Descriptor Control 1 */
-	reg = er32(TXDCTL1);
+	reg = er32(TXDCTL(1));
 	reg |= (1 << 22);
-	ew32(TXDCTL1, reg);
+	ew32(TXDCTL(1), reg);
 
 	/* Transmit Arbitration Control 0 */
-	reg = er32(TARC0);
+	reg = er32(TARC(0));
 	if (hw->mac.type == e1000_ich8lan)
 		reg |= (1 << 28) | (1 << 29);
 	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
-	ew32(TARC0, reg);
+	ew32(TARC(0), reg);
 
 	/* Transmit Arbitration Control 1 */
-	reg = er32(TARC1);
+	reg = er32(TARC(1));
 	if (er32(TCTL) & E1000_TCTL_MULR)
 		reg &= ~(1 << 28);
 	else
 		reg |= (1 << 28);
 	reg |= (1 << 24) | (1 << 26) | (1 << 30);
-	ew32(TARC1, reg);
+	ew32(TARC(1), reg);
 
 	/* Device Status */
 	if (hw->mac.type == e1000_ich8lan) {
@@ -1807,29 +1852,29 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
  **/
 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
 {
-	struct e1000_mac_info *mac = &hw->mac;
 	s32 ret_val;
 
 	if (e1000_check_reset_block(hw))
 		return 0;
 
-	/* ICH parts do not have a word in the NVM to determine
+	/*
+	 * ICH parts do not have a word in the NVM to determine
 	 * the default flow control setting, so we explicitly
 	 * set it to full.
 	 */
-	if (mac->fc == e1000_fc_default)
-		mac->fc = e1000_fc_full;
+	if (hw->fc.type == e1000_fc_default)
+		hw->fc.type = e1000_fc_full;
 
-	mac->original_fc = mac->fc;
+	hw->fc.original_type = hw->fc.type;
 
-	hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
+	hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type);
 
 	/* Continue to configure the copper link. */
 	ret_val = e1000_setup_copper_link_ich8lan(hw);
 	if (ret_val)
 		return ret_val;
 
-	ew32(FCTTV, mac->fc_pause_time);
+	ew32(FCTTV, hw->fc.pause_time);
 
 	return e1000e_set_fc_watermarks(hw);
 }
@@ -1853,9 +1898,11 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
 	ew32(CTRL, ctrl);
 
-	/* Set the mac to wait the maximum time between each iteration
+	/*
+	 * Set the mac to wait the maximum time between each iteration
 	 * and increase the max iterations when polling the phy;
-	 * this fixes erroneous timeouts at 10Mbps. */
+	 * this fixes erroneous timeouts at 10Mbps.
+	 */
 	ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
 	if (ret_val)
 		return ret_val;
@@ -1882,7 +1929,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
  *  @speed: pointer to store current link speed
  *  @duplex: pointer to store the current link duplex
  *
- *  Calls the generic get_speed_and_duplex to retreive the current link
+ *  Calls the generic get_speed_and_duplex to retrieve the current link
  *  information and then calls the Kumeran lock loss workaround for links at
  *  gigabit speeds.
  **/
@@ -1930,9 +1977,11 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
 	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
 		return 0;
 
-	/* Make sure link is up before proceeding.  If not just return.
+	/*
+	 * Make sure link is up before proceeding.  If not just return.
 	 * Attempting this while link is negotiating fouled up link
-	 * stability */
+	 * stability
+	 */
 	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
 	if (!link)
 		return 0;
@@ -1961,8 +2010,10 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
 		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
 	ew32(PHY_CTRL, phy_ctrl);
 
-	/* Call gig speed drop workaround on Gig disable before accessing
-	 * any PHY registers */
+	/*
+	 * Call gig speed drop workaround on Gig disable before accessing
+	 * any PHY registers
+	 */
 	e1000e_gig_downshift_workaround_ich8lan(hw);
 
 	/* unable to acquire PCS lock */
@@ -1970,7 +2021,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
 }
 
 /**
- *  e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state
+ *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
  *  @hw: pointer to the HW structure
  *  @state: boolean value used to set the current Kumeran workaround state
  *
@@ -2017,8 +2068,10 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
 			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
 		ew32(PHY_CTRL, reg);
 
-		/* Call gig speed drop workaround on Gig disable before
-		 * accessing any PHY registers */
+		/*
+		 * Call gig speed drop workaround on Gig disable before
+		 * accessing any PHY registers
+		 */
 		if (hw->mac.type == e1000_ich8lan)
 			e1000e_gig_downshift_workaround_ich8lan(hw);
 
@@ -2158,7 +2211,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
 	.get_link_up_info	= e1000_get_link_up_info_ich8lan,
 	.led_on			= e1000_led_on_ich8lan,
 	.led_off		= e1000_led_off_ich8lan,
-	.mc_addr_list_update	= e1000e_mc_addr_list_update_generic,
+	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
 	.reset_hw		= e1000_reset_hw_ich8lan,
 	.init_hw		= e1000_init_hw_ich8lan,
 	.setup_link		= e1000_setup_link_ich8lan,
@@ -2200,7 +2253,7 @@ struct e1000_info e1000_ich8_info = {
 				  | FLAG_HAS_FLASH
 				  | FLAG_APME_IN_WUC,
 	.pba			= 8,
-	.get_invariants		= e1000_get_invariants_ich8lan,
+	.get_variants		= e1000_get_variants_ich8lan,
 	.mac_ops		= &ich8_mac_ops,
 	.phy_ops		= &ich8_phy_ops,
 	.nvm_ops		= &ich8_nvm_ops,
@@ -2217,7 +2270,7 @@ struct e1000_info e1000_ich9_info = {
 				  | FLAG_HAS_FLASH
 				  | FLAG_APME_IN_WUC,
 	.pba			= 10,
-	.get_invariants		= e1000_get_invariants_ich8lan,
+	.get_variants		= e1000_get_variants_ich8lan,
 	.mac_ops		= &ich8_mac_ops,
 	.phy_ops		= &ich8_phy_ops,
 	.nvm_ops		= &ich8_nvm_ops,

+ 172 - 176
drivers/net/e1000e/lib.c

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -43,8 +43,8 @@ enum e1000_mng_mode {
 
 #define E1000_FACTPS_MNGCG		0x20000000
 
-#define E1000_IAMT_SIGNATURE		0x544D4149 /* Intel(R) Active Management
-						    * Technology signature */
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE		0x544D4149
 
 /**
  *  e1000e_get_bus_info_pcie - Get PCIe bus information
@@ -142,7 +142,8 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
 {
 	u32 rar_low, rar_high;
 
-	/* HW expects these in little endian so we reverse the byte order
+	/*
+	 * HW expects these in little endian so we reverse the byte order
 	 * from network order (big endian) to little endian
 	 */
 	rar_low = ((u32) addr[0] |
@@ -171,7 +172,8 @@ static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
 {
 	u32 hash_bit, hash_reg, mta;
 
-	/* The MTA is a register array of 32-bit registers. It is
+	/*
+	 * The MTA is a register array of 32-bit registers. It is
 	 * treated like an array of (32*mta_reg_count) bits.  We want to
 	 * set bit BitArray[hash_value]. So we figure out what register
 	 * the bit is in, read it, OR in the new bit, then write
@@ -208,12 +210,15 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
 	/* Register count multiplied by bits per register */
 	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
 
-	/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
-	 * where 0xFF would still fall within the hash mask. */
+	/*
+	 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+	 * where 0xFF would still fall within the hash mask.
+	 */
 	while (hash_mask >> bit_shift != 0xFF)
 		bit_shift++;
 
-	/* The portion of the address that is used for the hash table
+	/*
+	 * The portion of the address that is used for the hash table
 	 * is determined by the mc_filter_type setting.
 	 * The algorithm is such that there is a total of 8 bits of shifting.
 	 * The bit_shift for a mc_filter_type of 0 represents the number of
@@ -224,8 +229,8 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
 	 * cases are a variation of this algorithm...essentially raising the
 	 * number of bits to shift mc_addr[5] left, while still keeping the
 	 * 8-bit shifting total.
-	 */
-	/* For example, given the following Destination MAC Address and an
+	 *
+	 * For example, given the following Destination MAC Address and an
 	 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
 	 * we can see that the bit_shift for case 0 is 4.  These are the hash
 	 * values resulting from each mc_filter_type...
@@ -260,7 +265,7 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
 }
 
 /**
- *  e1000e_mc_addr_list_update_generic - Update Multicast addresses
+ *  e1000e_update_mc_addr_list_generic - Update Multicast addresses
  *  @hw: pointer to the HW structure
  *  @mc_addr_list: array of multicast addresses to program
  *  @mc_addr_count: number of multicast addresses to program
@@ -272,14 +277,15 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
  *  The parameter rar_count will usually be hw->mac.rar_entry_count
  *  unless there are workarounds that change this.
  **/
-void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
-				       u8 *mc_addr_list, u32 mc_addr_count,
-				       u32 rar_used_count, u32 rar_count)
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+					u8 *mc_addr_list, u32 mc_addr_count,
+					u32 rar_used_count, u32 rar_count)
 {
 	u32 hash_value;
 	u32 i;
 
-	/* Load the first set of multicast addresses into the exact
+	/*
+	 * Load the first set of multicast addresses into the exact
 	 * filters (RAR).  If there are not enough to fill the RAR
 	 * array, clear the filters.
 	 */
@@ -375,7 +381,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 	s32 ret_val;
 	bool link;
 
-	/* We only want to go out to the PHY registers to see if Auto-Neg
+	/*
+	 * We only want to go out to the PHY registers to see if Auto-Neg
 	 * has completed and/or if our link status has changed.  The
 	 * get_link_status flag is set upon receiving a Link Status
 	 * Change or Rx Sequence Error interrupt.
@@ -383,7 +390,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 	if (!mac->get_link_status)
 		return 0;
 
-	/* First we want to see if the MII Status Register reports
+	/*
+	 * First we want to see if the MII Status Register reports
 	 * link.  If so, then we want to get the current speed/duplex
 	 * of the PHY.
 	 */
@@ -396,11 +404,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 
 	mac->get_link_status = 0;
 
-	/* Check if there was DownShift, must be checked
-	 * immediately after link-up */
+	/*
+	 * Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
 	e1000e_check_downshift(hw);
 
-	/* If we are forcing speed/duplex, then we simply return since
+	/*
+	 * If we are forcing speed/duplex, then we simply return since
 	 * we have already determined whether we have link or not.
 	 */
 	if (!mac->autoneg) {
@@ -408,13 +419,15 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 		return ret_val;
 	}
 
-	/* Auto-Neg is enabled.  Auto Speed Detection takes care
+	/*
+	 * Auto-Neg is enabled.  Auto Speed Detection takes care
 	 * of MAC speed/duplex configuration.  So we only need to
 	 * configure Collision Distance in the MAC.
 	 */
 	e1000e_config_collision_dist(hw);
 
-	/* Configure Flow Control now that Auto-Neg has completed.
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
 	 * First, we need to restore the desired flow control
 	 * settings because we may have had to re-autoneg with a
 	 * different link partner.
@@ -446,7 +459,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
 	status = er32(STATUS);
 	rxcw = er32(RXCW);
 
-	/* If we don't have link (auto-negotiation failed or link partner
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
 	 * cannot auto-negotiate), the cable is plugged in (we have signal),
 	 * and our link partner is not trying to auto-negotiate with us (we
 	 * are receiving idles or data), we need to force link up. We also
@@ -477,7 +491,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
 			return ret_val;
 		}
 	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
-		/* If we are forcing link and we are receiving /C/ ordered
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
 		 * sets, re-enable auto-negotiation in the TXCW register
 		 * and disable forced link in the Device Control register
 		 * in an attempt to auto-negotiate with our link partner.
@@ -511,7 +526,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
 	status = er32(STATUS);
 	rxcw = er32(RXCW);
 
-	/* If we don't have link (auto-negotiation failed or link partner
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
 	 * cannot auto-negotiate), and our link partner is not trying to
 	 * auto-negotiate with us (we are receiving idles or data),
 	 * we need to force link up. We also need to give auto-negotiation
@@ -540,7 +556,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
 			return ret_val;
 		}
 	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
-		/* If we are forcing link and we are receiving /C/ ordered
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
 		 * sets, re-enable auto-negotiation in the TXCW register
 		 * and disable forced link in the Device Control register
 		 * in an attempt to auto-negotiate with our link partner.
@@ -551,7 +568,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
 
 		mac->serdes_has_link = 1;
 	} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
-		/* If we force link for non-auto-negotiation switch, check
+		/*
+		 * If we force link for non-auto-negotiation switch, check
 		 * link status based on MAC synchronization for internal
 		 * serdes media type.
 		 */
@@ -585,11 +603,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
  **/
 static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
 {
-	struct e1000_mac_info *mac = &hw->mac;
 	s32 ret_val;
 	u16 nvm_data;
 
-	/* Read and store word 0x0F of the EEPROM. This word contains bits
+	/*
+	 * Read and store word 0x0F of the EEPROM. This word contains bits
 	 * that determine the hardware's default PAUSE (flow control) mode,
 	 * a bit that determines whether the HW defaults to enabling or
 	 * disabling auto-negotiation, and the direction of the
@@ -605,12 +623,12 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
 	}
 
 	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
-		mac->fc = e1000_fc_none;
+		hw->fc.type = e1000_fc_none;
 	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
 		 NVM_WORD0F_ASM_DIR)
-		mac->fc = e1000_fc_tx_pause;
+		hw->fc.type = e1000_fc_tx_pause;
 	else
-		mac->fc = e1000_fc_full;
+		hw->fc.type = e1000_fc_full;
 
 	return 0;
 }
@@ -630,7 +648,8 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
 	struct e1000_mac_info *mac = &hw->mac;
 	s32 ret_val;
 
-	/* In the case of the phy reset being blocked, we already have a link.
+	/*
+	 * In the case of the phy reset being blocked, we already have a link.
 	 * We do not need to set it up again.
 	 */
 	if (e1000_check_reset_block(hw))
@@ -640,26 +659,28 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
 	 * If flow control is set to default, set flow control based on
 	 * the EEPROM flow control settings.
 	 */
-	if (mac->fc == e1000_fc_default) {
+	if (hw->fc.type == e1000_fc_default) {
 		ret_val = e1000_set_default_fc_generic(hw);
 		if (ret_val)
 			return ret_val;
 	}
 
-	/* We want to save off the original Flow Control configuration just
+	/*
+	 * We want to save off the original Flow Control configuration just
 	 * in case we get disconnected and then reconnected into a different
 	 * hub or switch with different Flow Control capabilities.
 	 */
-	mac->original_fc = mac->fc;
+	hw->fc.original_type = hw->fc.type;
 
-	hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
+	hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type);
 
 	/* Call the necessary media_type subroutine to configure the link. */
 	ret_val = mac->ops.setup_physical_interface(hw);
 	if (ret_val)
 		return ret_val;
 
-	/* Initialize the flow control address, type, and PAUSE timer
+	/*
+	 * Initialize the flow control address, type, and PAUSE timer
 	 * registers to their default values.  This is done even if flow
 	 * control is disabled, because it does not hurt anything to
 	 * initialize these registers.
@@ -669,7 +690,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
 	ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
 	ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
 
-	ew32(FCTTV, mac->fc_pause_time);
+	ew32(FCTTV, hw->fc.pause_time);
 
 	return e1000e_set_fc_watermarks(hw);
 }
@@ -686,7 +707,8 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
 	struct e1000_mac_info *mac = &hw->mac;
 	u32 txcw;
 
-	/* Check for a software override of the flow control settings, and
+	/*
+	 * Check for a software override of the flow control settings, and
 	 * setup the device accordingly.  If auto-negotiation is enabled, then
 	 * software will have to set the "PAUSE" bits to the correct value in
 	 * the Transmit Config Word Register (TXCW) and re-start auto-
@@ -700,31 +722,34 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
 	 *	  but not send pause frames).
 	 *      2:  Tx flow control is enabled (we can send pause frames but we
 	 *	  do not support receiving pause frames).
-	 *      3:  Both Rx and TX flow control (symmetric) are enabled.
+	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
 	 */
-	switch (mac->fc) {
+	switch (hw->fc.type) {
 	case e1000_fc_none:
 		/* Flow control completely disabled by a software over-ride. */
 		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
 		break;
 	case e1000_fc_rx_pause:
-		/* RX Flow control is enabled and TX Flow control is disabled
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is disabled
 		 * by a software over-ride. Since there really isn't a way to
-		 * advertise that we are capable of RX Pause ONLY, we will
-		 * advertise that we support both symmetric and asymmetric RX
+		 * advertise that we are capable of Rx Pause ONLY, we will
+		 * advertise that we support both symmetric and asymmetric Rx
 		 * PAUSE.  Later, we will disable the adapter's ability to send
 		 * PAUSE frames.
 		 */
 		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
 		break;
 	case e1000_fc_tx_pause:
-		/* TX Flow control is enabled, and RX Flow control is disabled,
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is disabled,
 		 * by a software over-ride.
 		 */
 		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
 		break;
 	case e1000_fc_full:
-		/* Flow control (both RX and TX) is enabled by a software
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
 		 * over-ride.
 		 */
 		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
@@ -754,7 +779,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
 	u32 i, status;
 	s32 ret_val;
 
-	/* If we have a signal (the cable is plugged in, or assumed true for
+	/*
+	 * If we have a signal (the cable is plugged in, or assumed true for
 	 * serdes media) then poll for a "Link-Up" indication in the Device
 	 * Status Register.  Time-out if a link isn't seen in 500 milliseconds
 	 * seconds (Auto-negotiation should complete in less than 500
@@ -769,7 +795,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
 	if (i == FIBER_LINK_UP_LIMIT) {
 		hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
 		mac->autoneg_failed = 1;
-		/* AutoNeg failed to achieve a link, so we'll call
+		/*
+		 * AutoNeg failed to achieve a link, so we'll call
 		 * mac->check_for_link. This routine will force the
 		 * link up if we detect a signal. This will allow us to
 		 * communicate with non-autonegotiating link partners.
@@ -811,7 +838,8 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	/* Since auto-negotiation is enabled, take the link out of reset (the
+	/*
+	 * Since auto-negotiation is enabled, take the link out of reset (the
 	 * link will be in reset, because we previously reset the chip). This
 	 * will restart auto-negotiation.  If auto-negotiation is successful
 	 * then the link-up status bit will be set and the flow control enable
@@ -823,11 +851,12 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
 	e1e_flush();
 	msleep(1);
 
-	/* For these adapters, the SW defineable pin 1 is set when the optics
+	/*
+	 * For these adapters, the SW definable pin 1 is set when the optics
 	 * detect a signal.  If we have a signal, then poll for a "Link-Up"
 	 * indication.
 	 */
-	if (hw->media_type == e1000_media_type_internal_serdes ||
+	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
 	    (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
 		ret_val = e1000_poll_fiber_serdes_link_generic(hw);
 	} else {
@@ -864,27 +893,28 @@ void e1000e_config_collision_dist(struct e1000_hw *hw)
  *
  *  Sets the flow control high/low threshold (watermark) registers.  If
  *  flow control XON frame transmission is enabled, then set XON frame
- *  tansmission as well.
+ *  transmission as well.
  **/
 s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
 {
-	struct e1000_mac_info *mac = &hw->mac;
 	u32 fcrtl = 0, fcrth = 0;
 
-	/* Set the flow control receive threshold registers.  Normally,
+	/*
+	 * Set the flow control receive threshold registers.  Normally,
 	 * these registers will be set to a default threshold that may be
 	 * adjusted later by the driver's runtime code.  However, if the
 	 * ability to transmit pause frames is not enabled, then these
 	 * registers will be set to 0.
 	 */
-	if (mac->fc & e1000_fc_tx_pause) {
-		/* We need to set up the Receive Threshold high and low water
+	if (hw->fc.type & e1000_fc_tx_pause) {
+		/*
+		 * We need to set up the Receive Threshold high and low water
 		 * marks as well as (optionally) enabling the transmission of
 		 * XON frames.
 		 */
-		fcrtl = mac->fc_low_water;
+		fcrtl = hw->fc.low_water;
 		fcrtl |= E1000_FCRTL_XONE;
-		fcrth = mac->fc_high_water;
+		fcrth = hw->fc.high_water;
 	}
 	ew32(FCRTL, fcrtl);
 	ew32(FCRTH, fcrth);
@@ -904,18 +934,18 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
  **/
 s32 e1000e_force_mac_fc(struct e1000_hw *hw)
 {
-	struct e1000_mac_info *mac = &hw->mac;
 	u32 ctrl;
 
 	ctrl = er32(CTRL);
 
-	/* Because we didn't get link via the internal auto-negotiation
+	/*
+	 * Because we didn't get link via the internal auto-negotiation
 	 * mechanism (we either forced link or we got link via PHY
 	 * auto-neg), we have to manually enable/disable transmit an
 	 * receive flow control.
 	 *
 	 * The "Case" statement below enables/disable flow control
-	 * according to the "mac->fc" parameter.
+	 * according to the "hw->fc.type" parameter.
 	 *
 	 * The possible values of the "fc" parameter are:
 	 *      0:  Flow control is completely disabled
@@ -923,12 +953,12 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
 	 *	  frames but not send pause frames).
 	 *      2:  Tx flow control is enabled (we can send pause frames
 	 *	  frames but we do not receive pause frames).
-	 *      3:  Both Rx and TX flow control (symmetric) is enabled.
+	 *      3:  Both Rx and Tx flow control (symmetric) is enabled.
 	 *  other:  No other values should be possible at this point.
 	 */
-	hw_dbg(hw, "mac->fc = %u\n", mac->fc);
+	hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type);
 
-	switch (mac->fc) {
+	switch (hw->fc.type) {
 	case e1000_fc_none:
 		ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
 		break;
@@ -970,16 +1000,17 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
 	u16 speed, duplex;
 
-	/* Check for the case where we have fiber media and auto-neg failed
+	/*
+	 * Check for the case where we have fiber media and auto-neg failed
 	 * so we had to force link.  In this case, we need to force the
 	 * configuration of the MAC to match the "fc" parameter.
 	 */
 	if (mac->autoneg_failed) {
-		if (hw->media_type == e1000_media_type_fiber ||
-		    hw->media_type == e1000_media_type_internal_serdes)
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes)
 			ret_val = e1000e_force_mac_fc(hw);
 	} else {
-		if (hw->media_type == e1000_media_type_copper)
+		if (hw->phy.media_type == e1000_media_type_copper)
 			ret_val = e1000e_force_mac_fc(hw);
 	}
 
@@ -988,13 +1019,15 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 		return ret_val;
 	}
 
-	/* Check for the case where we have copper media and auto-neg is
+	/*
+	 * Check for the case where we have copper media and auto-neg is
 	 * enabled.  In this case, we need to check and see if Auto-Neg
 	 * has completed, and if so, how the PHY and link partner has
 	 * flow control configured.
 	 */
-	if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) {
-		/* Read the MII Status Register and check to see if AutoNeg
+	if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+		/*
+		 * Read the MII Status Register and check to see if AutoNeg
 		 * has completed.  We read this twice because this reg has
 		 * some "sticky" (latched) bits.
 		 */
@@ -1011,7 +1044,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 			return ret_val;
 		}
 
-		/* The AutoNeg process has completed, so we now need to
+		/*
+		 * The AutoNeg process has completed, so we now need to
 		 * read both the Auto Negotiation Advertisement
 		 * Register (Address 4) and the Auto_Negotiation Base
 		 * Page Ability Register (Address 5) to determine how
@@ -1024,7 +1058,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 		if (ret_val)
 			return ret_val;
 
-		/* Two bits in the Auto Negotiation Advertisement Register
+		/*
+		 * Two bits in the Auto Negotiation Advertisement Register
 		 * (Address 4) and two bits in the Auto Negotiation Base
 		 * Page Ability Register (Address 5) determine flow control
 		 * for both the PHY and the link partner.  The following
@@ -1045,8 +1080,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 		 *   1   |    1    |   0   |    0    | e1000_fc_none
 		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
 		 *
-		 */
-		/* Are both PAUSE bits set to 1?  If so, this implies
+		 *
+		 * Are both PAUSE bits set to 1?  If so, this implies
 		 * Symmetric Flow Control is enabled at both ends.  The
 		 * ASM_DIR bits are irrelevant per the spec.
 		 *
@@ -1060,22 +1095,24 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 		 */
 		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
 		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
-			/* Now we need to check if the user selected RX ONLY
+			/*
+			 * Now we need to check if the user selected Rx ONLY
 			 * of pause frames.  In this case, we had to advertise
-			 * FULL flow control because we could not advertise RX
+			 * FULL flow control because we could not advertise Rx
 			 * ONLY. Hence, we must now check to see if we need to
 			 * turn OFF  the TRANSMISSION of PAUSE frames.
 			 */
-			if (mac->original_fc == e1000_fc_full) {
-				mac->fc = e1000_fc_full;
+			if (hw->fc.original_type == e1000_fc_full) {
+				hw->fc.type = e1000_fc_full;
 				hw_dbg(hw, "Flow Control = FULL.\r\n");
 			} else {
-				mac->fc = e1000_fc_rx_pause;
+				hw->fc.type = e1000_fc_rx_pause;
 				hw_dbg(hw, "Flow Control = "
 					 "RX PAUSE frames only.\r\n");
 			}
 		}
-		/* For receiving PAUSE frames ONLY.
+		/*
+		 * For receiving PAUSE frames ONLY.
 		 *
 		 *   LOCAL DEVICE  |   LINK PARTNER
 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1087,10 +1124,11 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 			  (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
 			  (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
 			  (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
-			mac->fc = e1000_fc_tx_pause;
-			hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
+			hw->fc.type = e1000_fc_tx_pause;
+			hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n");
 		}
-		/* For transmitting PAUSE frames ONLY.
+		/*
+		 * For transmitting PAUSE frames ONLY.
 		 *
 		 *   LOCAL DEVICE  |   LINK PARTNER
 		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1102,18 +1140,19 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 			 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
 			 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
 			 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
-			mac->fc = e1000_fc_rx_pause;
-			hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
+			hw->fc.type = e1000_fc_rx_pause;
+			hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n");
 		} else {
 			/*
 			 * Per the IEEE spec, at this point flow control
 			 * should be disabled.
 			 */
-			mac->fc = e1000_fc_none;
+			hw->fc.type = e1000_fc_none;
 			hw_dbg(hw, "Flow Control = NONE.\r\n");
 		}
 
-		/* Now we need to do one last check...  If we auto-
+		/*
+		 * Now we need to do one last check...  If we auto-
 		 * negotiated to HALF DUPLEX, flow control should not be
 		 * enabled per IEEE 802.3 spec.
 		 */
@@ -1124,9 +1163,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
 		}
 
 		if (duplex == HALF_DUPLEX)
-			mac->fc = e1000_fc_none;
+			hw->fc.type = e1000_fc_none;
 
-		/* Now we call a subroutine to actually force the MAC
+		/*
+		 * Now we call a subroutine to actually force the MAC
 		 * controller to use the correct flow control settings.
 		 */
 		ret_val = e1000e_force_mac_fc(hw);
@@ -1393,13 +1433,15 @@ s32 e1000e_blink_led(struct e1000_hw *hw)
 	u32 ledctl_blink = 0;
 	u32 i;
 
-	if (hw->media_type == e1000_media_type_fiber) {
+	if (hw->phy.media_type == e1000_media_type_fiber) {
 		/* always blink LED0 for PCI-E fiber */
 		ledctl_blink = E1000_LEDCTL_LED0_BLINK |
 		     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
 	} else {
-		/* set the blink bit for each LED that's "on" (0x0E)
-		 * in ledctl_mode2 */
+		/*
+		 * set the blink bit for each LED that's "on" (0x0E)
+		 * in ledctl_mode2
+		 */
 		ledctl_blink = hw->mac.ledctl_mode2;
 		for (i = 0; i < 4; i++)
 			if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
@@ -1423,7 +1465,7 @@ s32 e1000e_led_on_generic(struct e1000_hw *hw)
 {
 	u32 ctrl;
 
-	switch (hw->media_type) {
+	switch (hw->phy.media_type) {
 	case e1000_media_type_fiber:
 		ctrl = er32(CTRL);
 		ctrl &= ~E1000_CTRL_SWDPIN0;
@@ -1450,7 +1492,7 @@ s32 e1000e_led_off_generic(struct e1000_hw *hw)
 {
 	u32 ctrl;
 
-	switch (hw->media_type) {
+	switch (hw->phy.media_type) {
 	case e1000_media_type_fiber:
 		ctrl = er32(CTRL);
 		ctrl |= E1000_CTRL_SWDPIN0;
@@ -1562,8 +1604,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
 				else
 					mac->current_ifs_val +=
 						mac->ifs_step_size;
-				ew32(AIT,
-						mac->current_ifs_val);
+				ew32(AIT, mac->current_ifs_val);
 			}
 		}
 	} else {
@@ -1826,10 +1867,12 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
 		udelay(1);
 		timeout = NVM_MAX_RETRY_SPI;
 
-		/* Read "Status Register" repeatedly until the LSB is cleared.
+		/*
+		 * Read "Status Register" repeatedly until the LSB is cleared.
 		 * The EEPROM will signal that the command has been completed
 		 * by clearing bit 0 of the internal status register.  If it's
-		 * not cleared within 'timeout', then error out. */
+		 * not cleared within 'timeout', then error out.
+		 */
 		while (timeout) {
 			e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
 						 hw->nvm.opcode_bits);
@@ -1851,62 +1894,6 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
 	return 0;
 }
 
-/**
- *  e1000e_read_nvm_spi - Reads EEPROM using SPI
- *  @hw: pointer to the HW structure
- *  @offset: offset of word in the EEPROM to read
- *  @words: number of words to read
- *  @data: word read from the EEPROM
- *
- *  Reads a 16 bit word from the EEPROM.
- **/
-s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
-	struct e1000_nvm_info *nvm = &hw->nvm;
-	u32 i = 0;
-	s32 ret_val;
-	u16 word_in;
-	u8 read_opcode = NVM_READ_OPCODE_SPI;
-
-	/* A check for invalid values:  offset too large, too many words,
-	 * and not enough words. */
-	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
-	    (words == 0)) {
-		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
-		return -E1000_ERR_NVM;
-	}
-
-	ret_val = nvm->ops.acquire_nvm(hw);
-	if (ret_val)
-		return ret_val;
-
-	ret_val = e1000_ready_nvm_eeprom(hw);
-	if (ret_val) {
-		nvm->ops.release_nvm(hw);
-		return ret_val;
-	}
-
-	e1000_standby_nvm(hw);
-
-	if ((nvm->address_bits == 8) && (offset >= 128))
-		read_opcode |= NVM_A8_OPCODE_SPI;
-
-	/* Send the READ command (opcode + addr) */
-	e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
-	e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
-
-	/* Read the data.  SPI NVMs increment the address with each byte
-	 * read and will roll over if reading beyond the end.  This allows
-	 * us to read the whole NVM from any offset */
-	for (i = 0; i < words; i++) {
-		word_in = e1000_shift_in_eec_bits(hw, 16);
-		data[i] = (word_in >> 8) | (word_in << 8);
-	}
-
-	nvm->ops.release_nvm(hw);
-	return 0;
-}
-
 /**
  *  e1000e_read_nvm_eerd - Reads EEPROM using EERD register
  *  @hw: pointer to the HW structure
@@ -1922,8 +1909,10 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 	u32 i, eerd = 0;
 	s32 ret_val = 0;
 
-	/* A check for invalid values:  offset too large, too many words,
-	 * and not enough words. */
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * too many words for the offset, and not enough words.
+	 */
 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
 	    (words == 0)) {
 		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@@ -1939,8 +1928,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 		if (ret_val)
 			break;
 
-		data[i] = (er32(EERD) >>
-			   E1000_NVM_RW_REG_DATA);
+		data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
 	}
 
 	return ret_val;
@@ -1964,8 +1952,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 	s32 ret_val;
 	u16 widx = 0;
 
-	/* A check for invalid values:  offset too large, too many words,
-	 * and not enough words. */
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
 	    (words == 0)) {
 		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@@ -1995,8 +1985,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 
 		e1000_standby_nvm(hw);
 
-		/* Some SPI eeproms use the 8th address bit embedded in the
-		 * opcode */
+		/*
+		 * Some SPI eeproms use the 8th address bit embedded in the
+		 * opcode
+		 */
 		if ((nvm->address_bits == 8) && (offset >= 128))
 			write_opcode |= NVM_A8_OPCODE_SPI;
 
@@ -2041,9 +2033,9 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 		/* Check for an alternate MAC address.  An alternate MAC
 		 * address can be setup by pre-boot software and must be
 		 * treated like a permanent address and must override the
-		 * actual permanent MAC address. */
+		 * actual permanent MAC address.*/
 		ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
-						&mac_addr_offset);
+					 &mac_addr_offset);
 		if (ret_val) {
 			hw_dbg(hw, "NVM Read Error\n");
 			return ret_val;
@@ -2056,7 +2048,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 				mac_addr_offset += ETH_ALEN/sizeof(u16);
 
 			/* make sure we have a valid mac address here
-			 * before using it */
+			* before using it */
 			ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
 						 &nvm_data);
 			if (ret_val) {
@@ -2068,7 +2060,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 		}
 
 		if (mac_addr_offset)
-			hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
+		hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
 	}
 
 	for (i = 0; i < ETH_ALEN; i += 2) {
@@ -2244,7 +2236,7 @@ bool e1000e_check_mng_mode(struct e1000_hw *hw)
 }
 
 /**
- *  e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX
+ *  e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
  *  @hw: pointer to the HW structure
  *
  *  Enables packet filtering on transmit packets if manageability is enabled
@@ -2264,7 +2256,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
 		return 0;
 	}
 
-	/* If we can't read from the host interface for whatever
+	/*
+	 * If we can't read from the host interface for whatever
 	 * reason, disable filtering.
 	 */
 	ret_val = e1000_mng_enable_host_if(hw);
@@ -2282,7 +2275,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
 	hdr->checksum = 0;
 	csum = e1000_calculate_checksum((u8 *)hdr,
 					E1000_MNG_DHCP_COOKIE_LENGTH);
-	/* If either the checksums or signature don't match, then
+	/*
+	 * If either the checksums or signature don't match, then
 	 * the cookie area isn't considered valid, in which case we
 	 * take the safe route of assuming Tx filtering is enabled.
 	 */
@@ -2374,8 +2368,10 @@ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
 	/* Calculate length in DWORDs */
 	length >>= 2;
 
-	/* The device driver writes the relevant command block into the
-	 * ram area. */
+	/*
+	 * The device driver writes the relevant command block into the
+	 * ram area.
+	 */
 	for (i = 0; i < length; i++) {
 		for (j = 0; j < sizeof(u32); j++) {
 			*(tmp + j) = *bufptr++;
@@ -2481,7 +2477,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
 	return ret_val;
 }
 
-s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
+s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
 {
 	s32 ret_val;
 	u16 nvm_data;
@@ -2491,14 +2487,14 @@ s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
 		hw_dbg(hw, "NVM Read Error\n");
 		return ret_val;
 	}
-	*part_num = (u32)(nvm_data << 16);
+	*pba_num = (u32)(nvm_data << 16);
 
 	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
 	if (ret_val) {
 		hw_dbg(hw, "NVM Read Error\n");
 		return ret_val;
 	}
-	*part_num |= nvm_data;
+	*pba_num |= nvm_data;
 
 	return 0;
 }

+ 403 - 242
drivers/net/e1000e/netdev.c

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -82,7 +82,7 @@ static int e1000_desc_unused(struct e1000_ring *ring)
 }
 
 /**
- * e1000_receive_skb - helper function to handle rx indications
+ * e1000_receive_skb - helper function to handle Rx indications
  * @adapter: board private structure
  * @status: descriptor status field as written by hardware
  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
@@ -138,8 +138,9 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
 		/* TCP checksum is good */
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 	} else {
-		/* IP fragment with UDP payload */
-		/* Hardware complements the payload checksum, so we undo it
+		/*
+		 * IP fragment with UDP payload
+		 * Hardware complements the payload checksum, so we undo it
 		 * and then put the value in host order for further stack use.
 		 */
 		__sum16 sum = (__force __sum16)htons(csum);
@@ -182,7 +183,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 			break;
 		}
 
-		/* Make buffer alignment 2 beyond a 16 byte boundary
+		/*
+		 * Make buffer alignment 2 beyond a 16 byte boundary
 		 * this will result in a 16 byte aligned IP header after
 		 * the 14 byte MAC header is removed
 		 */
@@ -213,10 +215,12 @@ map_skb:
 		if (i-- == 0)
 			i = (rx_ring->count - 1);
 
-		/* Force memory writes to complete before letting h/w
+		/*
+		 * Force memory writes to complete before letting h/w
 		 * know there are new descriptors to fetch.  (Only
 		 * applicable for weak-ordered memory model archs,
-		 * such as IA-64). */
+		 * such as IA-64).
+		 */
 		wmb();
 		writel(i, adapter->hw.hw_addr + rx_ring->tail);
 	}
@@ -285,7 +289,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 			break;
 		}
 
-		/* Make buffer alignment 2 beyond a 16 byte boundary
+		/*
+		 * Make buffer alignment 2 beyond a 16 byte boundary
 		 * this will result in a 16 byte aligned IP header after
 		 * the 14 byte MAC header is removed
 		 */
@@ -319,12 +324,15 @@ no_buffers:
 		if (!(i--))
 			i = (rx_ring->count - 1);
 
-		/* Force memory writes to complete before letting h/w
+		/*
+		 * Force memory writes to complete before letting h/w
 		 * know there are new descriptors to fetch.  (Only
 		 * applicable for weak-ordered memory model archs,
-		 * such as IA-64). */
+		 * such as IA-64).
+		 */
 		wmb();
-		/* Hardware increments by 16 bytes, but packet split
+		/*
+		 * Hardware increments by 16 bytes, but packet split
 		 * descriptors are 32 bytes...so we increment tail
 		 * twice as much.
 		 */
@@ -409,9 +417,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 		total_rx_bytes += length;
 		total_rx_packets++;
 
-		/* code added for copybreak, this should improve
+		/*
+		 * code added for copybreak, this should improve
 		 * performance for small packets with large amounts
-		 * of reassembly being done in the stack */
+		 * of reassembly being done in the stack
+		 */
 		if (length < copybreak) {
 			struct sk_buff *new_skb =
 			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
@@ -581,14 +591,15 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
 	}
 
 	if (adapter->detect_tx_hung) {
-		/* Detect a transmit hang in hardware, this serializes the
-		 * check with the clearing of time_stamp and movement of i */
+		/*
+		 * Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i
+		 */
 		adapter->detect_tx_hung = 0;
 		if (tx_ring->buffer_info[eop].dma &&
 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
 			       + (adapter->tx_timeout_factor * HZ))
-		    && !(er32(STATUS) &
-			 E1000_STATUS_TXOFF)) {
+		    && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
 			e1000_print_tx_hang(adapter);
 			netif_stop_queue(netdev);
 		}
@@ -677,21 +688,28 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 		skb_put(skb, length);
 
 		{
-		/* this looks ugly, but it seems compiler issues make it
-		   more efficient than reusing j */
+		/*
+		 * this looks ugly, but it seems compiler issues make it
+		 * more efficient than reusing j
+		 */
 		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
 
-		/* page alloc/put takes too long and effects small packet
-		 * throughput, so unsplit small packets and save the alloc/put*/
+		/*
+		 * page alloc/put takes too long and effects small packet
+		 * throughput, so unsplit small packets and save the alloc/put
+		 * only valid in softirq (napi) context to call kmap_*
+		 */
 		if (l1 && (l1 <= copybreak) &&
 		    ((length + l1) <= adapter->rx_ps_bsize0)) {
 			u8 *vaddr;
 
 			ps_page = &buffer_info->ps_pages[0];
 
-			/* there is no documentation about how to call
+			/*
+			 * there is no documentation about how to call
 			 * kmap_atomic, so we can't hold the mapping
-			 * very long */
+			 * very long
+			 */
 			pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
 				PAGE_SIZE, PCI_DMA_FROMDEVICE);
 			vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
@@ -836,26 +854,31 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 icr = er32(ICR);
 
-	/* read ICR disables interrupts using IAM, so keep up with our
-	 * enable/disable accounting */
-	atomic_inc(&adapter->irq_sem);
+	/*
+	 * read ICR disables interrupts using IAM
+	 */
 
 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
 		hw->mac.get_link_status = 1;
-		/* ICH8 workaround-- Call gig speed drop workaround on cable
-		 * disconnect (LSC) before accessing any PHY registers */
+		/*
+		 * ICH8 workaround-- Call gig speed drop workaround on cable
+		 * disconnect (LSC) before accessing any PHY registers
+		 */
 		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
 		    (!(er32(STATUS) & E1000_STATUS_LU)))
 			e1000e_gig_downshift_workaround_ich8lan(hw);
 
-		/* 80003ES2LAN workaround-- For packet buffer work-around on
+		/*
+		 * 80003ES2LAN workaround-- For packet buffer work-around on
 		 * link down event; disable receives here in the ISR and reset
-		 * adapter in watchdog */
+		 * adapter in watchdog
+		 */
 		if (netif_carrier_ok(netdev) &&
 		    adapter->flags & FLAG_RX_NEEDS_RESTART) {
 			/* disable receives */
 			u32 rctl = er32(RCTL);
 			ew32(RCTL, rctl & ~E1000_RCTL_EN);
+			adapter->flags |= FLAG_RX_RESTART_NOW;
 		}
 		/* guard against interrupt when we're going down */
 		if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -868,8 +891,6 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
 		adapter->total_rx_bytes = 0;
 		adapter->total_rx_packets = 0;
 		__netif_rx_schedule(netdev, &adapter->napi);
-	} else {
-		atomic_dec(&adapter->irq_sem);
 	}
 
 	return IRQ_HANDLED;
@@ -890,26 +911,31 @@ static irqreturn_t e1000_intr(int irq, void *data)
 	if (!icr)
 		return IRQ_NONE;  /* Not our interrupt */
 
-	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
-	 * not set, then the adapter didn't send an interrupt */
+	/*
+	 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt
+	 */
 	if (!(icr & E1000_ICR_INT_ASSERTED))
 		return IRQ_NONE;
 
-	/* Interrupt Auto-Mask...upon reading ICR,
+	/*
+	 * Interrupt Auto-Mask...upon reading ICR,
 	 * interrupts are masked.  No need for the
-	 * IMC write, but it does mean we should
-	 * account for it ASAP. */
-	atomic_inc(&adapter->irq_sem);
+	 * IMC write
+	 */
 
 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
 		hw->mac.get_link_status = 1;
-		/* ICH8 workaround-- Call gig speed drop workaround on cable
-		 * disconnect (LSC) before accessing any PHY registers */
+		/*
+		 * ICH8 workaround-- Call gig speed drop workaround on cable
+		 * disconnect (LSC) before accessing any PHY registers
+		 */
 		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
 		    (!(er32(STATUS) & E1000_STATUS_LU)))
 			e1000e_gig_downshift_workaround_ich8lan(hw);
 
-		/* 80003ES2LAN workaround--
+		/*
+		 * 80003ES2LAN workaround--
 		 * For packet buffer work-around on link down event;
 		 * disable receives here in the ISR and
 		 * reset adapter in watchdog
@@ -919,6 +945,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
 			/* disable receives */
 			rctl = er32(RCTL);
 			ew32(RCTL, rctl & ~E1000_RCTL_EN);
+			adapter->flags |= FLAG_RX_RESTART_NOW;
 		}
 		/* guard against interrupt when we're going down */
 		if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -931,8 +958,6 @@ static irqreturn_t e1000_intr(int irq, void *data)
 		adapter->total_rx_bytes = 0;
 		adapter->total_rx_packets = 0;
 		__netif_rx_schedule(netdev, &adapter->napi);
-	} else {
-		atomic_dec(&adapter->irq_sem);
 	}
 
 	return IRQ_HANDLED;
@@ -983,7 +1008,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 
-	atomic_inc(&adapter->irq_sem);
 	ew32(IMC, ~0);
 	e1e_flush();
 	synchronize_irq(adapter->pdev->irq);
@@ -996,10 +1020,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 
-	if (atomic_dec_and_test(&adapter->irq_sem)) {
-		ew32(IMS, IMS_ENABLE_MASK);
-		e1e_flush();
-	}
+	ew32(IMS, IMS_ENABLE_MASK);
+	e1e_flush();
 }
 
 /**
@@ -1023,8 +1045,7 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
 		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
 	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
 		ctrl_ext = er32(CTRL_EXT);
-		ew32(CTRL_EXT,
-				ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+		ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
 	}
 }
 
@@ -1050,8 +1071,7 @@ static void e1000_release_hw_control(struct e1000_adapter *adapter)
 		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
 	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
 		ctrl_ext = er32(CTRL_EXT);
-		ew32(CTRL_EXT,
-				ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+		ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
 	}
 }
 
@@ -1353,9 +1373,11 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
 
 set_itr_now:
 	if (new_itr != adapter->itr) {
-		/* this attempts to bias the interrupt rate towards Bulk
+		/*
+		 * this attempts to bias the interrupt rate towards Bulk
 		 * by adding intermediate steps when interrupt rate is
-		 * increasing */
+		 * increasing
+		 */
 		new_itr = new_itr > adapter->itr ?
 			     min(adapter->itr + (new_itr >> 2), new_itr) :
 			     new_itr;
@@ -1366,7 +1388,7 @@ set_itr_now:
 
 /**
  * e1000_clean - NAPI Rx polling callback
- * @adapter: board private structure
+ * @napi: struct associated with this polling callback
  * @budget: amount of packets driver is allowed to process this poll
  **/
 static int e1000_clean(struct napi_struct *napi, int budget)
@@ -1378,10 +1400,12 @@ static int e1000_clean(struct napi_struct *napi, int budget)
 	/* Must NOT use netdev_priv macro here. */
 	adapter = poll_dev->priv;
 
-	/* e1000_clean is called per-cpu.  This lock protects
+	/*
+	 * e1000_clean is called per-cpu.  This lock protects
 	 * tx_ring from being cleaned by multiple cpus
 	 * simultaneously.  A failure obtaining the lock means
-	 * tx_ring is currently being cleaned anyway. */
+	 * tx_ring is currently being cleaned anyway.
+	 */
 	if (spin_trylock(&adapter->tx_queue_lock)) {
 		tx_cleaned = e1000_clean_tx_irq(adapter);
 		spin_unlock(&adapter->tx_queue_lock);
@@ -1427,9 +1451,12 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 vfta, index;
 
-	e1000_irq_disable(adapter);
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		e1000_irq_disable(adapter);
 	vlan_group_set_device(adapter->vlgrp, vid, NULL);
-	e1000_irq_enable(adapter);
+
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		e1000_irq_enable(adapter);
 
 	if ((adapter->hw.mng_cookie.status &
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
@@ -1480,7 +1507,8 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl, rctl;
 
-	e1000_irq_disable(adapter);
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		e1000_irq_disable(adapter);
 	adapter->vlgrp = grp;
 
 	if (grp) {
@@ -1517,7 +1545,8 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
 		}
 	}
 
-	e1000_irq_enable(adapter);
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		e1000_irq_enable(adapter);
 }
 
 static void e1000_restore_vlan(struct e1000_adapter *adapter)
@@ -1546,9 +1575,11 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
 
 	manc = er32(MANC);
 
-	/* enable receiving management packets to the host. this will probably
+	/*
+	 * enable receiving management packets to the host. this will probably
 	 * generate destination unreachable messages from the host OS, but
-	 * the packets will be handled on SMBUS */
+	 * the packets will be handled on SMBUS
+	 */
 	manc |= E1000_MANC_EN_MNG2HOST;
 	manc2h = er32(MANC2H);
 #define E1000_MNG2HOST_PORT_623 (1 << 5)
@@ -1598,7 +1629,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
 
 	/* Set the Tx Interrupt Delay register */
 	ew32(TIDV, adapter->tx_int_delay);
-	/* tx irq moderation */
+	/* Tx irq moderation */
 	ew32(TADV, adapter->tx_abs_int_delay);
 
 	/* Program the Transmit Control Register */
@@ -1608,22 +1639,24 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
 
 	if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
-		tarc = er32(TARC0);
-		/* set the speed mode bit, we'll clear it if we're not at
-		 * gigabit link later */
+		tarc = er32(TARC(0));
+		/*
+		 * set the speed mode bit, we'll clear it if we're not at
+		 * gigabit link later
+		 */
 #define SPEED_MODE_BIT (1 << 21)
 		tarc |= SPEED_MODE_BIT;
-		ew32(TARC0, tarc);
+		ew32(TARC(0), tarc);
 	}
 
 	/* errata: program both queues to unweighted RR */
 	if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
-		tarc = er32(TARC0);
+		tarc = er32(TARC(0));
 		tarc |= 1;
-		ew32(TARC0, tarc);
-		tarc = er32(TARC1);
+		ew32(TARC(0), tarc);
+		tarc = er32(TARC(1));
 		tarc |= 1;
-		ew32(TARC1, tarc);
+		ew32(TARC(1), tarc);
 	}
 
 	e1000e_config_collision_dist(hw);
@@ -1731,8 +1764,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
 		/* Configure extra packet-split registers */
 		rfctl = er32(RFCTL);
 		rfctl |= E1000_RFCTL_EXTEN;
-		/* disable packet split support for IPv6 extension headers,
-		 * because some malformed IPv6 headers can hang the RX */
+		/*
+		 * disable packet split support for IPv6 extension headers,
+		 * because some malformed IPv6 headers can hang the Rx
+		 */
 		rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
 			  E1000_RFCTL_NEW_IPV6_EXT_DIS);
 
@@ -1761,6 +1796,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
 	}
 
 	ew32(RCTL, rctl);
+	/* just started the receive unit, no need to restart */
+	adapter->flags &= ~FLAG_RX_RESTART_NOW;
 }
 
 /**
@@ -1801,8 +1838,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 	/* irq moderation */
 	ew32(RADV, adapter->rx_abs_int_delay);
 	if (adapter->itr_setting != 0)
-		ew32(ITR,
-			1000000000 / (adapter->itr * 256));
+		ew32(ITR, 1000000000 / (adapter->itr * 256));
 
 	ctrl_ext = er32(CTRL_EXT);
 	/* Reset delay timers after every interrupt */
@@ -1813,8 +1849,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 	ew32(CTRL_EXT, ctrl_ext);
 	e1e_flush();
 
-	/* Setup the HW Rx Head and Tail Descriptor Pointers and
-	 * the Base and Length of the Rx Descriptor Ring */
+	/*
+	 * Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
 	rdba = rx_ring->dma;
 	ew32(RDBAL, (rdba & DMA_32BIT_MASK));
 	ew32(RDBAH, (rdba >> 32));
@@ -1829,8 +1867,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 	if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
 		rxcsum |= E1000_RXCSUM_TUOFL;
 
-		/* IPv4 payload checksum for UDP fragments must be
-		 * used in conjunction with packet-split. */
+		/*
+		 * IPv4 payload checksum for UDP fragments must be
+		 * used in conjunction with packet-split.
+		 */
 		if (adapter->rx_ps_pages)
 			rxcsum |= E1000_RXCSUM_IPPCSE;
 	} else {
@@ -1839,9 +1879,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 	}
 	ew32(RXCSUM, rxcsum);
 
-	/* Enable early receives on supported devices, only takes effect when
+	/*
+	 * Enable early receives on supported devices, only takes effect when
 	 * packet size is equal or larger than the specified value (in 8 byte
-	 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */
+	 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
+	 */
 	if ((adapter->flags & FLAG_HAS_ERT) &&
 	    (adapter->netdev->mtu > ETH_DATA_LEN))
 		ew32(ERT, E1000_ERT_2048);
@@ -1851,7 +1893,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 }
 
 /**
- *  e1000_mc_addr_list_update - Update Multicast addresses
+ *  e1000_update_mc_addr_list - Update Multicast addresses
  *  @hw: pointer to the HW structure
  *  @mc_addr_list: array of multicast addresses to program
  *  @mc_addr_count: number of multicast addresses to program
@@ -1865,11 +1907,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
  *  exists and all implementations are handled in the generic version of this
  *  function.
  **/
-static void e1000_mc_addr_list_update(struct e1000_hw *hw, u8 *mc_addr_list,
-			       u32 mc_addr_count, u32 rar_used_count,
-			       u32 rar_count)
+static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+				      u32 mc_addr_count, u32 rar_used_count,
+				      u32 rar_count)
 {
-	hw->mac.ops.mc_addr_list_update(hw, mc_addr_list, mc_addr_count,
+	hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
 				        rar_used_count, rar_count);
 }
 
@@ -1923,7 +1965,7 @@ static void e1000_set_multi(struct net_device *netdev)
 			mc_ptr = mc_ptr->next;
 		}
 
-		e1000_mc_addr_list_update(hw, mta_list, i, 1,
+		e1000_update_mc_addr_list(hw, mta_list, i, 1,
 					  mac->rar_entry_count);
 		kfree(mta_list);
 	} else {
@@ -1931,13 +1973,12 @@ static void e1000_set_multi(struct net_device *netdev)
 		 * if we're called from probe, we might not have
 		 * anything to do here, so clear out the list
 		 */
-		e1000_mc_addr_list_update(hw, NULL, 0, 1,
-					  mac->rar_entry_count);
+		e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count);
 	}
 }
 
 /**
- * e1000_configure - configure the hardware for RX and TX
+ * e1000_configure - configure the hardware for Rx and Tx
  * @adapter: private board structure
  **/
 static void e1000_configure(struct e1000_adapter *adapter)
@@ -1950,8 +1991,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
 	e1000_configure_tx(adapter);
 	e1000_setup_rctl(adapter);
 	e1000_configure_rx(adapter);
-	adapter->alloc_rx_buf(adapter,
-			      e1000_desc_unused(adapter->rx_ring));
+	adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
 }
 
 /**
@@ -1967,9 +2007,11 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
 	u16 mii_reg = 0;
 
 	/* Just clear the power down bit to wake the phy back up */
-	if (adapter->hw.media_type == e1000_media_type_copper) {
-		/* according to the manual, the phy will retain its
-		 * settings across a power-down/up cycle */
+	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+		/*
+		 * According to the manual, the phy will retain its
+		 * settings across a power-down/up cycle
+		 */
 		e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
 		mii_reg &= ~MII_CR_POWER_DOWN;
 		e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
@@ -1994,12 +2036,11 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
 		return;
 
 	/* non-copper PHY? */
-	if (adapter->hw.media_type != e1000_media_type_copper)
+	if (adapter->hw.phy.media_type != e1000_media_type_copper)
 		return;
 
 	/* reset is blocked because of a SoL/IDER session */
-	if (e1000e_check_mng_mode(hw) ||
-	    e1000_check_reset_block(hw))
+	if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw))
 		return;
 
 	/* manageability (AMT) is enabled */
@@ -2019,51 +2060,61 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
  * This function boots the hardware and enables some settings that
  * require a configuration cycle of the hardware - those cannot be
  * set/changed during runtime. After reset the device needs to be
- * properly configured for rx, tx etc.
+ * properly configured for Rx, Tx etc.
  */
 void e1000e_reset(struct e1000_adapter *adapter)
 {
 	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_fc_info *fc = &adapter->hw.fc;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 tx_space, min_tx_space, min_rx_space;
-	u32 pba;
+	u32 pba = adapter->pba;
 	u16 hwm;
 
-	ew32(PBA, adapter->pba);
+	/* reset Packet Buffer Allocation to default */
+	ew32(PBA, pba);
 
-	if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) {
-		/* To maintain wire speed transmits, the Tx FIFO should be
+	if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+		/*
+		 * To maintain wire speed transmits, the Tx FIFO should be
 		 * large enough to accommodate two full transmit packets,
 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
 		 * the Rx FIFO should be large enough to accommodate at least
 		 * one full receive packet and is similarly rounded up and
-		 * expressed in KB. */
+		 * expressed in KB.
+		 */
 		pba = er32(PBA);
 		/* upper 16 bits has Tx packet buffer allocation size in KB */
 		tx_space = pba >> 16;
 		/* lower 16 bits has Rx packet buffer allocation size in KB */
 		pba &= 0xffff;
-		/* the tx fifo also stores 16 bytes of information about the tx
-		 * but don't include ethernet FCS because hardware appends it */
-		min_tx_space = (mac->max_frame_size +
+		/*
+		 * the Tx fifo also stores 16 bytes of information about the tx
+		 * but don't include ethernet FCS because hardware appends it
+		 */
+		min_tx_space = (adapter->max_frame_size +
 				sizeof(struct e1000_tx_desc) -
 				ETH_FCS_LEN) * 2;
 		min_tx_space = ALIGN(min_tx_space, 1024);
 		min_tx_space >>= 10;
 		/* software strips receive CRC, so leave room for it */
-		min_rx_space = mac->max_frame_size;
+		min_rx_space = adapter->max_frame_size;
 		min_rx_space = ALIGN(min_rx_space, 1024);
 		min_rx_space >>= 10;
 
-		/* If current Tx allocation is less than the min Tx FIFO size,
+		/*
+		 * If current Tx allocation is less than the min Tx FIFO size,
 		 * and the min Tx FIFO size is less than the current Rx FIFO
-		 * allocation, take space away from current Rx allocation */
+		 * allocation, take space away from current Rx allocation
+		 */
 		if ((tx_space < min_tx_space) &&
 		    ((min_tx_space - tx_space) < pba)) {
 			pba -= min_tx_space - tx_space;
 
-			/* if short on rx space, rx wins and must trump tx
-			 * adjustment or use Early Receive if available */
+			/*
+			 * if short on Rx space, Rx wins and must trump tx
+			 * adjustment or use Early Receive if available
+			 */
 			if ((pba < min_rx_space) &&
 			    (!(adapter->flags & FLAG_HAS_ERT)))
 				/* ERT enabled in e1000_configure_rx */
@@ -2074,29 +2125,33 @@ void e1000e_reset(struct e1000_adapter *adapter)
 	}
 
 
-	/* flow control settings */
-	/* The high water mark must be low enough to fit one full frame
+	/*
+	 * flow control settings
+	 *
+	 * The high water mark must be low enough to fit one full frame
 	 * (or the size used for early receive) above it in the Rx FIFO.
 	 * Set it to the lower of:
 	 * - 90% of the Rx FIFO size, and
 	 * - the full Rx FIFO size minus the early receive size (for parts
 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
-	 * - the full Rx FIFO size minus one full frame */
+	 * - the full Rx FIFO size minus one full frame
+	 */
 	if (adapter->flags & FLAG_HAS_ERT)
-		hwm = min(((adapter->pba << 10) * 9 / 10),
-			  ((adapter->pba << 10) - (E1000_ERT_2048 << 3)));
+		hwm = min(((pba << 10) * 9 / 10),
+			  ((pba << 10) - (E1000_ERT_2048 << 3)));
 	else
-		hwm = min(((adapter->pba << 10) * 9 / 10),
-			  ((adapter->pba << 10) - mac->max_frame_size));
+		hwm = min(((pba << 10) * 9 / 10),
+			  ((pba << 10) - adapter->max_frame_size));
 
-	mac->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
-	mac->fc_low_water = mac->fc_high_water - 8;
+	fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
+	fc->low_water = fc->high_water - 8;
 
 	if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
-		mac->fc_pause_time = 0xFFFF;
+		fc->pause_time = 0xFFFF;
 	else
-		mac->fc_pause_time = E1000_FC_PAUSE_TIME;
-	mac->fc = mac->original_fc;
+		fc->pause_time = E1000_FC_PAUSE_TIME;
+	fc->send_xon = 1;
+	fc->type = fc->original_type;
 
 	/* Allow time for pending master requests to run */
 	mac->ops.reset_hw(hw);
@@ -2115,9 +2170,11 @@ void e1000e_reset(struct e1000_adapter *adapter)
 
 	if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
 		u16 phy_data = 0;
-		/* speed up time to link by disabling smart power down, ignore
+		/*
+		 * speed up time to link by disabling smart power down, ignore
 		 * the return value of this function because there is nothing
-		 * different we would do if it failed */
+		 * different we would do if it failed
+		 */
 		e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
 		phy_data &= ~IGP02E1000_PM_SPD;
 		e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
@@ -2147,8 +2204,10 @@ void e1000e_down(struct e1000_adapter *adapter)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 tctl, rctl;
 
-	/* signal that we're down so the interrupt handler does not
-	 * reschedule our watchdog timer */
+	/*
+	 * signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer
+	 */
 	set_bit(__E1000_DOWN, &adapter->state);
 
 	/* disable receives in the hardware */
@@ -2167,7 +2226,6 @@ void e1000e_down(struct e1000_adapter *adapter)
 	msleep(10);
 
 	napi_disable(&adapter->napi);
-	atomic_set(&adapter->irq_sem, 0);
 	e1000_irq_disable(adapter);
 
 	del_timer_sync(&adapter->watchdog_timer);
@@ -2208,13 +2266,12 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
  **/
 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
 
 	adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
 	adapter->rx_ps_bsize0 = 128;
-	hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
-	hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
 	adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
 	if (!adapter->tx_ring)
@@ -2227,7 +2284,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
 	spin_lock_init(&adapter->tx_queue_lock);
 
 	/* Explicitly disable IRQ since the NIC can be in any state. */
-	atomic_set(&adapter->irq_sem, 0);
 	e1000_irq_disable(adapter);
 
 	spin_lock_init(&adapter->stats_lock);
@@ -2281,16 +2337,20 @@ static int e1000_open(struct net_device *netdev)
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
 		e1000_update_mng_vlan(adapter);
 
-	/* If AMT is enabled, let the firmware know that the network
-	 * interface is now open */
+	/*
+	 * If AMT is enabled, let the firmware know that the network
+	 * interface is now open
+	 */
 	if ((adapter->flags & FLAG_HAS_AMT) &&
 	    e1000e_check_mng_mode(&adapter->hw))
 		e1000_get_hw_control(adapter);
 
-	/* before we allocate an interrupt, we must be ready to handle it.
+	/*
+	 * before we allocate an interrupt, we must be ready to handle it.
 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
 	 * as soon as we call pci_request_irq, so we have to setup our
-	 * clean_rx handler before we do so.  */
+	 * clean_rx handler before we do so.
+	 */
 	e1000_configure(adapter);
 
 	err = e1000_request_irq(adapter);
@@ -2344,16 +2404,20 @@ static int e1000_close(struct net_device *netdev)
 	e1000e_free_tx_resources(adapter);
 	e1000e_free_rx_resources(adapter);
 
-	/* kill manageability vlan ID if supported, but not if a vlan with
-	 * the same ID is registered on the host OS (let 8021q kill it) */
+	/*
+	 * kill manageability vlan ID if supported, but not if a vlan with
+	 * the same ID is registered on the host OS (let 8021q kill it)
+	 */
 	if ((adapter->hw.mng_cookie.status &
 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
 	     !(adapter->vlgrp &&
 	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
 		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
 
-	/* If AMT is enabled, let the firmware know that the network
-	 * interface is now closed */
+	/*
+	 * If AMT is enabled, let the firmware know that the network
+	 * interface is now closed
+	 */
 	if ((adapter->flags & FLAG_HAS_AMT) &&
 	    e1000e_check_mng_mode(&adapter->hw))
 		e1000_release_hw_control(adapter);
@@ -2384,12 +2448,14 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
 		/* activate the work around */
 		e1000e_set_laa_state_82571(&adapter->hw, 1);
 
-		/* Hold a copy of the LAA in RAR[14] This is done so that
+		/*
+		 * Hold a copy of the LAA in RAR[14] This is done so that
 		 * between the time RAR[0] gets clobbered  and the time it
 		 * gets fixed (in e1000_watchdog), the actual LAA is in one
 		 * of the RARs and no incoming packets directed to this port
 		 * are dropped. Eventually the LAA will be in RAR[0] and
-		 * RAR[14] */
+		 * RAR[14]
+		 */
 		e1000e_rar_set(&adapter->hw,
 			      adapter->hw.mac.addr,
 			      adapter->hw.mac.rar_entry_count - 1);
@@ -2398,8 +2464,10 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
 	return 0;
 }
 
-/* Need to wait a few seconds after link up to get diagnostic information from
- * the phy */
+/*
+ * Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
 static void e1000_update_phy_info(unsigned long data)
 {
 	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
@@ -2430,7 +2498,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
 
 	spin_lock_irqsave(&adapter->stats_lock, irq_flags);
 
-	/* these counters are modified from e1000_adjust_tbi_stats,
+	/*
+	 * these counters are modified from e1000_adjust_tbi_stats,
 	 * called from the interrupt context, so they must only
 	 * be written while holding adapter->stats_lock
 	 */
@@ -2524,8 +2593,10 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
 
 	/* Rx Errors */
 
-	/* RLEC on some newer hardware can be incorrect so build
-	* our own version based on RUC and ROC */
+	/*
+	 * RLEC on some newer hardware can be incorrect so build
+	 * our own version based on RUC and ROC
+	 */
 	adapter->net_stats.rx_errors = adapter->stats.rxerrc +
 		adapter->stats.crcerrs + adapter->stats.algnerrc +
 		adapter->stats.ruc + adapter->stats.roc +
@@ -2546,7 +2617,7 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
 	/* Tx Dropped needs to be maintained elsewhere */
 
 	/* Phy Stats */
-	if (hw->media_type == e1000_media_type_copper) {
+	if (hw->phy.media_type == e1000_media_type_copper) {
 		if ((adapter->link_speed == SPEED_1000) &&
 		   (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) {
 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
@@ -2564,8 +2635,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
 
 static void e1000_print_link_info(struct e1000_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
 	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
 	u32 ctrl = er32(CTRL);
 
 	ndev_info(netdev,
@@ -2579,6 +2650,62 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
 		((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
 }
 
+static bool e1000_has_link(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	bool link_active = 0;
+	s32 ret_val = 0;
+
+	/*
+	 * get_link_status is set on LSC (link status) interrupt or
+	 * Rx sequence error interrupt.  get_link_status will stay
+	 * false until the check_for_link establishes link
+	 * for copper adapters ONLY
+	 */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		if (hw->mac.get_link_status) {
+			ret_val = hw->mac.ops.check_for_link(hw);
+			link_active = !hw->mac.get_link_status;
+		} else {
+			link_active = 1;
+		}
+		break;
+	case e1000_media_type_fiber:
+		ret_val = hw->mac.ops.check_for_link(hw);
+		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
+		break;
+	case e1000_media_type_internal_serdes:
+		ret_val = hw->mac.ops.check_for_link(hw);
+		link_active = adapter->hw.mac.serdes_has_link;
+		break;
+	default:
+	case e1000_media_type_unknown:
+		break;
+	}
+
+	if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
+		ndev_info(adapter->netdev,
+			  "Gigabit has been disabled, downgrading speed\n");
+	}
+
+	return link_active;
+}
+
+static void e1000e_enable_receives(struct e1000_adapter *adapter)
+{
+	/* make sure the receive unit is started */
+	if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+	    (adapter->flags & FLAG_RX_RESTART_NOW)) {
+		struct e1000_hw *hw = &adapter->hw;
+		u32 rctl = er32(RCTL);
+		ew32(RCTL, rctl | E1000_RCTL_EN);
+		adapter->flags &= ~FLAG_RX_RESTART_NOW;
+	}
+}
+
 /**
  * e1000_watchdog - Timer Call-back
  * @data: pointer to adapter cast into an unsigned long
@@ -2597,48 +2724,35 @@ static void e1000_watchdog_task(struct work_struct *work)
 {
 	struct e1000_adapter *adapter = container_of(work,
 					struct e1000_adapter, watchdog_task);
-
 	struct net_device *netdev = adapter->netdev;
 	struct e1000_mac_info *mac = &adapter->hw.mac;
 	struct e1000_ring *tx_ring = adapter->tx_ring;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 link, tctl;
-	s32 ret_val;
 	int tx_pending = 0;
 
-	if ((netif_carrier_ok(netdev)) &&
-	    (er32(STATUS) & E1000_STATUS_LU))
+	link = e1000_has_link(adapter);
+	if ((netif_carrier_ok(netdev)) && link) {
+		e1000e_enable_receives(adapter);
 		goto link_up;
-
-	ret_val = mac->ops.check_for_link(hw);
-	if ((ret_val == E1000_ERR_PHY) &&
-	    (adapter->hw.phy.type == e1000_phy_igp_3) &&
-	    (er32(CTRL) &
-	     E1000_PHY_CTRL_GBE_DISABLE)) {
-		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
-		ndev_info(netdev,
-			"Gigabit has been disabled, downgrading speed\n");
 	}
 
 	if ((e1000e_enable_tx_pkt_filtering(hw)) &&
 	    (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
 		e1000_update_mng_vlan(adapter);
 
-	if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
-	   !(er32(TXCW) & E1000_TXCW_ANE))
-		link = adapter->hw.mac.serdes_has_link;
-	else
-		link = er32(STATUS) & E1000_STATUS_LU;
-
 	if (link) {
 		if (!netif_carrier_ok(netdev)) {
 			bool txb2b = 1;
+			/* update snapshot of PHY registers on LSC */
 			mac->ops.get_link_up_info(&adapter->hw,
 						   &adapter->link_speed,
 						   &adapter->link_duplex);
 			e1000_print_link_info(adapter);
-			/* tweak tx_queue_len according to speed/duplex
-			 * and adjust the timeout factor */
+			/*
+			 * tweak tx_queue_len according to speed/duplex
+			 * and adjust the timeout factor
+			 */
 			netdev->tx_queue_len = adapter->tx_queue_len;
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
@@ -2654,18 +2768,22 @@ static void e1000_watchdog_task(struct work_struct *work)
 				break;
 			}
 
-			/* workaround: re-program speed mode bit after
-			 * link-up event */
+			/*
+			 * workaround: re-program speed mode bit after
+			 * link-up event
+			 */
 			if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
 			    !txb2b) {
 				u32 tarc0;
-				tarc0 = er32(TARC0);
+				tarc0 = er32(TARC(0));
 				tarc0 &= ~SPEED_MODE_BIT;
-				ew32(TARC0, tarc0);
+				ew32(TARC(0), tarc0);
 			}
 
-			/* disable TSO for pcie and 10/100 speeds, to avoid
-			 * some hardware issues */
+			/*
+			 * disable TSO for pcie and 10/100 speeds, to avoid
+			 * some hardware issues
+			 */
 			if (!(adapter->flags & FLAG_TSO_FORCE)) {
 				switch (adapter->link_speed) {
 				case SPEED_10:
@@ -2685,8 +2803,10 @@ static void e1000_watchdog_task(struct work_struct *work)
 				}
 			}
 
-			/* enable transmits in the hardware, need to do this
-			 * after setting TARC0 */
+			/*
+			 * enable transmits in the hardware, need to do this
+			 * after setting TARC(0)
+			 */
 			tctl = er32(TCTL);
 			tctl |= E1000_TCTL_EN;
 			ew32(TCTL, tctl);
@@ -2697,13 +2817,6 @@ static void e1000_watchdog_task(struct work_struct *work)
 			if (!test_bit(__E1000_DOWN, &adapter->state))
 				mod_timer(&adapter->phy_info_timer,
 					  round_jiffies(jiffies + 2 * HZ));
-		} else {
-			/* make sure the receive unit is started */
-			if (adapter->flags & FLAG_RX_NEEDS_RESTART) {
-				u32 rctl = er32(RCTL);
-				ew32(RCTL, rctl |
-						E1000_RCTL_EN);
-			}
 		}
 	} else {
 		if (netif_carrier_ok(netdev)) {
@@ -2740,23 +2853,27 @@ link_up:
 		tx_pending = (e1000_desc_unused(tx_ring) + 1 <
 			       tx_ring->count);
 		if (tx_pending) {
-			/* We've lost link, so the controller stops DMA,
+			/*
+			 * We've lost link, so the controller stops DMA,
 			 * but we've got queued Tx work that's never going
 			 * to get done, so reset controller to flush Tx.
-			 * (Do the reset outside of interrupt context). */
+			 * (Do the reset outside of interrupt context).
+			 */
 			adapter->tx_timeout_count++;
 			schedule_work(&adapter->reset_task);
 		}
 	}
 
-	/* Cause software interrupt to ensure rx ring is cleaned */
+	/* Cause software interrupt to ensure Rx ring is cleaned */
 	ew32(ICS, E1000_ICS_RXDMT0);
 
 	/* Force detection of hung controller every watchdog period */
 	adapter->detect_tx_hung = 1;
 
-	/* With 82571 controllers, LAA may be overwritten due to controller
-	 * reset from the other port. Set the appropriate LAA in RAR[0] */
+	/*
+	 * With 82571 controllers, LAA may be overwritten due to controller
+	 * reset from the other port. Set the appropriate LAA in RAR[0]
+	 */
 	if (e1000e_get_laa_state_82571(hw))
 		e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
 
@@ -3032,16 +3149,20 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
 
 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
 
-	/* Force memory writes to complete before letting h/w
+	/*
+	 * Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
 	 * applicable for weak-ordered memory model archs,
-	 * such as IA-64). */
+	 * such as IA-64).
+	 */
 	wmb();
 
 	tx_ring->next_to_use = i;
 	writel(i, adapter->hw.hw_addr + tx_ring->tail);
-	/* we need this if more than one processor can write to our tail
-	 * at a time, it synchronizes IO on IA64/Altix systems */
+	/*
+	 * we need this if more than one processor can write to our tail
+	 * at a time, it synchronizes IO on IA64/Altix systems
+	 */
 	mmiowb();
 }
 
@@ -3089,13 +3210,17 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 
 	netif_stop_queue(netdev);
-	/* Herbert's original patch had:
+	/*
+	 * Herbert's original patch had:
 	 *  smp_mb__after_netif_stop_queue();
-	 * but since that doesn't exist yet, just open code it. */
+	 * but since that doesn't exist yet, just open code it.
+	 */
 	smp_mb();
 
-	/* We need to check again in a case another CPU has just
-	 * made room available. */
+	/*
+	 * We need to check again in a case another CPU has just
+	 * made room available.
+	 */
 	if (e1000_desc_unused(adapter->tx_ring) < size)
 		return -EBUSY;
 
@@ -3142,21 +3267,29 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	}
 
 	mss = skb_shinfo(skb)->gso_size;
-	/* The controller does a simple calculation to
+	/*
+	 * The controller does a simple calculation to
 	 * make sure there is enough room in the FIFO before
 	 * initiating the DMA for each buffer.  The calc is:
 	 * 4 = ceil(buffer len/mss).  To make sure we don't
 	 * overrun the FIFO, adjust the max buffer len if mss
-	 * drops. */
+	 * drops.
+	 */
 	if (mss) {
 		u8 hdr_len;
 		max_per_txd = min(mss << 2, max_per_txd);
 		max_txd_pwr = fls(max_per_txd) - 1;
 
-		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
-		* points to just header, pull a few bytes of payload from
-		* frags into skb->data */
+		/*
+		 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
+		 * points to just header, pull a few bytes of payload from
+		 * frags into skb->data
+		 */
 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		/*
+		 * we do this workaround for ES2LAN, but it is un-necessary,
+		 * avoiding it could save a lot of cycles
+		 */
 		if (skb->data_len && (hdr_len == len)) {
 			unsigned int pull_size;
 
@@ -3190,8 +3323,10 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 		/* Collision - tell upper layer to requeue */
 		return NETDEV_TX_LOCKED;
 
-	/* need: count + 2 desc gap to keep tail from touching
-	 * head, otherwise try next time */
+	/*
+	 * need: count + 2 desc gap to keep tail from touching
+	 * head, otherwise try next time
+	 */
 	if (e1000_maybe_stop_tx(netdev, count + 2)) {
 		spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
 		return NETDEV_TX_BUSY;
@@ -3216,9 +3351,11 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	else if (e1000_tx_csum(adapter, skb))
 		tx_flags |= E1000_TX_FLAGS_CSUM;
 
-	/* Old method was to assume IPv4 packet by default if TSO was enabled.
+	/*
+	 * Old method was to assume IPv4 packet by default if TSO was enabled.
 	 * 82571 hardware supports TSO capabilities for IPv6 as well...
-	 * no longer assume, we must. */
+	 * no longer assume, we must.
+	 */
 	if (skb->protocol == htons(ETH_P_IP))
 		tx_flags |= E1000_TX_FLAGS_IPV4;
 
@@ -3316,14 +3453,16 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
 		msleep(1);
 	/* e1000e_down has a dependency on max_frame_size */
-	adapter->hw.mac.max_frame_size = max_frame;
+	adapter->max_frame_size = max_frame;
 	if (netif_running(netdev))
 		e1000e_down(adapter);
 
-	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+	/*
+	 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
 	 * means we reserve 2 more, this pushes us to allocate from the next
 	 * larger slab size.
-	 * i.e. RXBUFFER_2048 --> size-4096 slab */
+	 * i.e. RXBUFFER_2048 --> size-4096 slab
+	 */
 
 	if (max_frame <= 256)
 		adapter->rx_buffer_len = 256;
@@ -3340,7 +3479,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
 	     (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
 		adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
-					 + ETH_FCS_LEN ;
+					 + ETH_FCS_LEN;
 
 	ndev_info(netdev, "changing MTU from %d to %d\n",
 		netdev->mtu, new_mtu);
@@ -3363,7 +3502,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 	struct mii_ioctl_data *data = if_mii(ifr);
 	unsigned long irq_flags;
 
-	if (adapter->hw.media_type != e1000_media_type_copper)
+	if (adapter->hw.phy.media_type != e1000_media_type_copper)
 		return -EOPNOTSUPP;
 
 	switch (cmd) {
@@ -3445,8 +3584,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 			E1000_CTRL_EN_PHY_PWR_MGMT;
 		ew32(CTRL, ctrl);
 
-		if (adapter->hw.media_type == e1000_media_type_fiber ||
-		   adapter->hw.media_type == e1000_media_type_internal_serdes) {
+		if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
+		    adapter->hw.phy.media_type ==
+		    e1000_media_type_internal_serdes) {
 			/* keep the laser running in D3 */
 			ctrl_ext = er32(CTRL_EXT);
 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
@@ -3476,8 +3616,10 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 	if (adapter->hw.phy.type == e1000_phy_igp_3)
 		e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
 
-	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
-	 * would have already happened in close and is redundant. */
+	/*
+	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant.
+	 */
 	e1000_release_hw_control(adapter);
 
 	pci_disable_device(pdev);
@@ -3552,9 +3694,11 @@ static int e1000_resume(struct pci_dev *pdev)
 
 	netif_device_attach(netdev);
 
-	/* If the controller has AMT, do not set DRV_LOAD until the interface
+	/*
+	 * If the controller has AMT, do not set DRV_LOAD until the interface
 	 * is up.  For all other cases, let the f/w know that the h/w is now
-	 * under the control of the driver. */
+	 * under the control of the driver.
+	 */
 	if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw))
 		e1000_get_hw_control(adapter);
 
@@ -3665,9 +3809,11 @@ static void e1000_io_resume(struct pci_dev *pdev)
 
 	netif_device_attach(netdev);
 
-	/* If the controller has AMT, do not set DRV_LOAD until the interface
+	/*
+	 * If the controller has AMT, do not set DRV_LOAD until the interface
 	 * is up.  For all other cases, let the f/w know that the h/w is now
-	 * under the control of the driver. */
+	 * under the control of the driver.
+	 */
 	if (!(adapter->flags & FLAG_HAS_AMT) ||
 	    !e1000e_check_mng_mode(&adapter->hw))
 		e1000_get_hw_control(adapter);
@@ -3678,7 +3824,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
-	u32 part_num;
+	u32 pba_num;
 
 	/* print bus type/speed/width info */
 	ndev_info(netdev, "(PCI Express:2.5GB/s:%s) "
@@ -3693,10 +3839,10 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
 	ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n",
 		  (hw->phy.type == e1000_phy_ife)
 		   ? "10/100" : "1000");
-	e1000e_read_part_num(hw, &part_num);
+	e1000e_read_pba_num(hw, &pba_num);
 	ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
 		  hw->mac.type, hw->phy.type,
-		  (part_num >> 8), (part_num & 0xff));
+		  (pba_num >> 8), (pba_num & 0xff));
 }
 
 /**
@@ -3828,16 +3974,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
 
-	err = ei->get_invariants(adapter);
+	err = ei->get_variants(adapter);
 	if (err)
 		goto err_hw_init;
 
 	hw->mac.ops.get_bus_info(&adapter->hw);
 
-	adapter->hw.phy.wait_for_link = 0;
+	adapter->hw.phy.autoneg_wait_to_complete = 0;
 
 	/* Copper options */
-	if (adapter->hw.media_type == e1000_media_type_copper) {
+	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
 		adapter->hw.phy.mdix = AUTO_ALL_MODES;
 		adapter->hw.phy.disable_polarity_correction = 0;
 		adapter->hw.phy.ms_type = e1000_ms_hw_default;
@@ -3861,15 +4007,19 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
-	/* We should not be using LLTX anymore, but we are still TX faster with
-	 * it. */
+	/*
+	 * We should not be using LLTX anymore, but we are still Tx faster with
+	 * it.
+	 */
 	netdev->features |= NETIF_F_LLTX;
 
 	if (e1000e_enable_mng_pass_thru(&adapter->hw))
 		adapter->flags |= FLAG_MNG_PT_ENABLED;
 
-	/* before reading the NVM, reset the controller to
-	 * put the device in a known good starting state */
+	/*
+	 * before reading the NVM, reset the controller to
+	 * put the device in a known good starting state
+	 */
 	adapter->hw.mac.ops.reset_hw(&adapter->hw);
 
 	/*
@@ -3919,8 +4069,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 	/* Initialize link parameters. User can change them with ethtool */
 	adapter->hw.mac.autoneg = 1;
 	adapter->fc_autoneg = 1;
-	adapter->hw.mac.original_fc = e1000_fc_default;
-	adapter->hw.mac.fc = e1000_fc_default;
+	adapter->hw.fc.original_type = e1000_fc_default;
+	adapter->hw.fc.type = e1000_fc_default;
 	adapter->hw.phy.autoneg_advertised = 0x2f;
 
 	/* ring size defaults */
@@ -3963,9 +4113,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 	/* reset the hardware with the new settings */
 	e1000e_reset(adapter);
 
-	/* If the controller has AMT, do not set DRV_LOAD until the interface
+	/*
+	 * If the controller has AMT, do not set DRV_LOAD until the interface
 	 * is up.  For all other cases, let the f/w know that the h/w is now
-	 * under the control of the driver. */
+	 * under the control of the driver.
+	 */
 	if (!(adapter->flags & FLAG_HAS_AMT) ||
 	    !e1000e_check_mng_mode(&adapter->hw))
 		e1000_get_hw_control(adapter);
@@ -4022,16 +4174,20 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 
-	/* flush_scheduled work may reschedule our watchdog task, so
-	 * explicitly disable watchdog tasks from being rescheduled  */
+	/*
+	 * flush_scheduled work may reschedule our watchdog task, so
+	 * explicitly disable watchdog tasks from being rescheduled
+	 */
 	set_bit(__E1000_DOWN, &adapter->state);
 	del_timer_sync(&adapter->watchdog_timer);
 	del_timer_sync(&adapter->phy_info_timer);
 
 	flush_scheduled_work();
 
-	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
-	 * would have already happened in close and is redundant. */
+	/*
+	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant.
+	 */
 	e1000_release_hw_control(adapter);
 
 	unregister_netdev(netdev);
@@ -4069,13 +4225,16 @@ static struct pci_device_id e1000_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
+
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
+
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
+
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
 	  board_80003es2lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
@@ -4084,6 +4243,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
 	  board_80003es2lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
 	  board_80003es2lan },
+
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
@@ -4091,6 +4251,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
+
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
@@ -4108,7 +4269,7 @@ static struct pci_driver e1000_driver = {
 	.probe    = e1000_probe,
 	.remove   = __devexit_p(e1000_remove),
 #ifdef CONFIG_PM
-	/* Power Managment Hooks */
+	/* Power Management Hooks */
 	.suspend  = e1000_suspend,
 	.resume   = e1000_resume,
 #endif
@@ -4127,7 +4288,7 @@ static int __init e1000_init_module(void)
 	int ret;
 	printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
 	       e1000e_driver_name, e1000e_driver_version);
-	printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n",
+	printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
 	       e1000e_driver_name);
 	ret = pci_register_driver(&e1000_driver);
 

+ 21 - 12
drivers/net/e1000e/param.c

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -30,7 +30,8 @@
 
 #include "e1000.h"
 
-/* This is the only thing that needs to be changed to adjust the
+/*
+ * This is the only thing that needs to be changed to adjust the
  * maximum number of ports that the driver can manage.
  */
 
@@ -46,7 +47,8 @@ module_param(copybreak, uint, 0644);
 MODULE_PARM_DESC(copybreak,
 	"Maximum size of packet that is copied to a new buffer on receive");
 
-/* All parameters are treated the same, as an integer array of values.
+/*
+ * All parameters are treated the same, as an integer array of values.
  * This macro just reduces the need to repeat the same declaration code
  * over and over (plus this helps to avoid typo bugs).
  */
@@ -60,8 +62,9 @@ MODULE_PARM_DESC(copybreak,
 	MODULE_PARM_DESC(X, desc);
 
 
-/* Transmit Interrupt Delay in units of 1.024 microseconds
- *  Tx interrupt delay needs to typically be set to something non zero
+/*
+ * Transmit Interrupt Delay in units of 1.024 microseconds
+ * Tx interrupt delay needs to typically be set to something non zero
  *
  * Valid Range: 0-65535
  */
@@ -70,7 +73,8 @@ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
 #define MAX_TXDELAY 0xFFFF
 #define MIN_TXDELAY 0
 
-/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+/*
+ * Transmit Absolute Interrupt Delay in units of 1.024 microseconds
  *
  * Valid Range: 0-65535
  */
@@ -79,8 +83,9 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
 #define MAX_TXABSDELAY 0xFFFF
 #define MIN_TXABSDELAY 0
 
-/* Receive Interrupt Delay in units of 1.024 microseconds
- *   hardware will likely hang if you set this to anything but zero.
+/*
+ * Receive Interrupt Delay in units of 1.024 microseconds
+ * hardware will likely hang if you set this to anything but zero.
  *
  * Valid Range: 0-65535
  */
@@ -89,7 +94,8 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
 #define MAX_RXDELAY 0xFFFF
 #define MIN_RXDELAY 0
 
-/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+/*
+ * Receive Absolute Interrupt Delay in units of 1.024 microseconds
  *
  * Valid Range: 0-65535
  */
@@ -98,7 +104,8 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
 #define MAX_RXABSDELAY 0xFFFF
 #define MIN_RXABSDELAY 0
 
-/* Interrupt Throttle Rate (interrupts/sec)
+/*
+ * Interrupt Throttle Rate (interrupts/sec)
  *
  * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
  */
@@ -107,7 +114,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
 #define MAX_ITR 100000
 #define MIN_ITR 100
 
-/* Enable Smart Power Down of the PHY
+/*
+ * Enable Smart Power Down of the PHY
  *
  * Valid Range: 0, 1
  *
@@ -115,7 +123,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
  */
 E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
 
-/* Enable Kumeran Lock Loss workaround
+/*
+ * Enable Kumeran Lock Loss workaround
  *
  * Valid Range: 0, 1
  *

+ 103 - 61
drivers/net/e1000e/phy.c

@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2008 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -134,7 +134,8 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
 		return -E1000_ERR_PARAM;
 	}
 
-	/* Set up Op-code, Phy Address, and register offset in the MDI
+	/*
+	 * Set up Op-code, Phy Address, and register offset in the MDI
 	 * Control register.  The MAC will take care of interfacing with the
 	 * PHY to retrieve the desired data.
 	 */
@@ -144,7 +145,11 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
 
 	ew32(MDIC, mdic);
 
-	/* Poll the ready bit to see if the MDI read completed */
+	/*
+	 * Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
 	for (i = 0; i < 64; i++) {
 		udelay(50);
 		mdic = er32(MDIC);
@@ -182,7 +187,8 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
 		return -E1000_ERR_PARAM;
 	}
 
-	/* Set up Op-code, Phy Address, and register offset in the MDI
+	/*
+	 * Set up Op-code, Phy Address, and register offset in the MDI
 	 * Control register.  The MAC will take care of interfacing with the
 	 * PHY to retrieve the desired data.
 	 */
@@ -409,14 +415,15 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
 	s32 ret_val;
 	u16 phy_data;
 
-	/* Enable CRS on TX. This must be set for half-duplex operation. */
+	/* Enable CRS on Tx. This must be set for half-duplex operation. */
 	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
 	if (ret_val)
 		return ret_val;
 
 	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
 
-	/* Options:
+	/*
+	 * Options:
 	 *   MDI/MDI-X = 0 (default)
 	 *   0 - Auto for all speeds
 	 *   1 - MDI mode
@@ -441,7 +448,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
 		break;
 	}
 
-	/* Options:
+	/*
+	 * Options:
 	 *   disable_polarity_correction = 0 (default)
 	 *       Automatic Correction for Reversed Cable Polarity
 	 *   0 - Disabled
@@ -456,7 +464,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
 		return ret_val;
 
 	if (phy->revision < 4) {
-		/* Force TX_CLK in the Extended PHY Specific Control Register
+		/*
+		 * Force TX_CLK in the Extended PHY Specific Control Register
 		 * to 25MHz clock.
 		 */
 		ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
@@ -543,19 +552,21 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
 
 	/* set auto-master slave resolution settings */
 	if (hw->mac.autoneg) {
-		/* when autonegotiation advertisement is only 1000Mbps then we
+		/*
+		 * when autonegotiation advertisement is only 1000Mbps then we
 		 * should disable SmartSpeed and enable Auto MasterSlave
-		 * resolution as hardware default. */
+		 * resolution as hardware default.
+		 */
 		if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
 			/* Disable SmartSpeed */
 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     &data);
+					   &data);
 			if (ret_val)
 				return ret_val;
 
 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     data);
+					   data);
 			if (ret_val)
 				return ret_val;
 
@@ -630,14 +641,16 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
 			return ret_val;
 	}
 
-	/* Need to parse both autoneg_advertised and fc and set up
+	/*
+	 * Need to parse both autoneg_advertised and fc and set up
 	 * the appropriate PHY registers.  First we will parse for
 	 * autoneg_advertised software override.  Since we can advertise
 	 * a plethora of combinations, we need to check each bit
 	 * individually.
 	 */
 
-	/* First we clear all the 10/100 mb speed bits in the Auto-Neg
+	/*
+	 * First we clear all the 10/100 mb speed bits in the Auto-Neg
 	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
 	 * the  1000Base-T Control Register (Address 9).
 	 */
@@ -683,7 +696,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
 		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
 	}
 
-	/* Check for a software override of the flow control settings, and
+	/*
+	 * Check for a software override of the flow control settings, and
 	 * setup the PHY advertisement registers accordingly.  If
 	 * auto-negotiation is enabled, then software will have to set the
 	 * "PAUSE" bits to the correct value in the Auto-Negotiation
@@ -696,38 +710,42 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
 	 *	  but not send pause frames).
 	 *      2:  Tx flow control is enabled (we can send pause frames
 	 *	  but we do not support receiving pause frames).
-	 *      3:  Both Rx and TX flow control (symmetric) are enabled.
+	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
 	 *  other:  No software override.  The flow control configuration
 	 *	  in the EEPROM is used.
 	 */
-	switch (hw->mac.fc) {
+	switch (hw->fc.type) {
 	case e1000_fc_none:
-		/* Flow control (RX & TX) is completely disabled by a
+		/*
+		 * Flow control (Rx & Tx) is completely disabled by a
 		 * software over-ride.
 		 */
 		mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
 		break;
 	case e1000_fc_rx_pause:
-		/* RX Flow control is enabled, and TX Flow control is
+		/*
+		 * Rx Flow control is enabled, and Tx Flow control is
 		 * disabled, by a software over-ride.
-		 */
-		/* Since there really isn't a way to advertise that we are
-		 * capable of RX Pause ONLY, we will advertise that we
-		 * support both symmetric and asymmetric RX PAUSE.  Later
+		 *
+		 * Since there really isn't a way to advertise that we are
+		 * capable of Rx Pause ONLY, we will advertise that we
+		 * support both symmetric and asymmetric Rx PAUSE.  Later
 		 * (in e1000e_config_fc_after_link_up) we will disable the
 		 * hw's ability to send PAUSE frames.
 		 */
 		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
 		break;
 	case e1000_fc_tx_pause:
-		/* TX Flow control is enabled, and RX Flow control is
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
 		 * disabled, by a software over-ride.
 		 */
 		mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
 		mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
 		break;
 	case e1000_fc_full:
-		/* Flow control (both RX and TX) is enabled by a software
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
 		 * over-ride.
 		 */
 		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
@@ -758,7 +776,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
  *  Performs initial bounds checking on autoneg advertisement parameter, then
  *  configure to advertise the full capability.  Setup the PHY to autoneg
  *  and restart the negotiation process between the link partner.  If
- *  wait_for_link, then wait for autoneg to complete before exiting.
+ *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
  **/
 static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
 {
@@ -766,12 +784,14 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
 	s32 ret_val;
 	u16 phy_ctrl;
 
-	/* Perform some bounds checking on the autoneg advertisement
+	/*
+	 * Perform some bounds checking on the autoneg advertisement
 	 * parameter.
 	 */
 	phy->autoneg_advertised &= phy->autoneg_mask;
 
-	/* If autoneg_advertised is zero, we assume it was not defaulted
+	/*
+	 * If autoneg_advertised is zero, we assume it was not defaulted
 	 * by the calling code so we set to advertise full capability.
 	 */
 	if (phy->autoneg_advertised == 0)
@@ -785,7 +805,8 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
 	}
 	hw_dbg(hw, "Restarting Auto-Neg\n");
 
-	/* Restart auto-negotiation by setting the Auto Neg Enable bit and
+	/*
+	 * Restart auto-negotiation by setting the Auto Neg Enable bit and
 	 * the Auto Neg Restart bit in the PHY control register.
 	 */
 	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
@@ -797,10 +818,11 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	/* Does the user want to wait for Auto-Neg to complete here, or
+	/*
+	 * Does the user want to wait for Auto-Neg to complete here, or
 	 * check at a later time (for example, callback routine).
 	 */
-	if (phy->wait_for_link) {
+	if (phy->autoneg_wait_to_complete) {
 		ret_val = e1000_wait_autoneg(hw);
 		if (ret_val) {
 			hw_dbg(hw, "Error while waiting for "
@@ -829,14 +851,18 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
 	bool link;
 
 	if (hw->mac.autoneg) {
-		/* Setup autoneg and flow control advertisement and perform
-		 * autonegotiation. */
+		/*
+		 * Setup autoneg and flow control advertisement and perform
+		 * autonegotiation.
+		 */
 		ret_val = e1000_copper_link_autoneg(hw);
 		if (ret_val)
 			return ret_val;
 	} else {
-		/* PHY will be set to 10H, 10F, 100H or 100F
-		 * depending on user settings. */
+		/*
+		 * PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings.
+		 */
 		hw_dbg(hw, "Forcing Speed and Duplex\n");
 		ret_val = e1000_phy_force_speed_duplex(hw);
 		if (ret_val) {
@@ -845,7 +871,8 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
 		}
 	}
 
-	/* Check link status. Wait up to 100 microseconds for link to become
+	/*
+	 * Check link status. Wait up to 100 microseconds for link to become
 	 * valid.
 	 */
 	ret_val = e1000e_phy_has_link_generic(hw,
@@ -891,7 +918,8 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	/* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  IGP requires MDI
 	 * forced whenever speed and duplex are forced.
 	 */
 	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
@@ -909,7 +937,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
 
 	udelay(1);
 
-	if (phy->wait_for_link) {
+	if (phy->autoneg_wait_to_complete) {
 		hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n");
 
 		ret_val = e1000e_phy_has_link_generic(hw,
@@ -941,7 +969,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
  *  Calls the PHY setup function to force speed and duplex.  Clears the
  *  auto-crossover to force MDI manually.  Resets the PHY to commit the
  *  changes.  If time expires while waiting for link up, we reset the DSP.
- *  After reset, TX_CLK and CRS on TX must be set.  Return successful upon
+ *  After reset, TX_CLK and CRS on Tx must be set.  Return successful upon
  *  successful completion, else return corresponding error code.
  **/
 s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
@@ -951,7 +979,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 	u16 phy_data;
 	bool link;
 
-	/* Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
 	 * forced whenever speed and duplex are forced.
 	 */
 	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -980,7 +1009,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 
 	udelay(1);
 
-	if (phy->wait_for_link) {
+	if (phy->autoneg_wait_to_complete) {
 		hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n");
 
 		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
@@ -989,10 +1018,12 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 			return ret_val;
 
 		if (!link) {
-			/* We didn't get link.
+			/*
+			 * We didn't get link.
 			 * Reset the DSP and cross our fingers.
 			 */
-			ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d);
+			ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+					   0x001d);
 			if (ret_val)
 				return ret_val;
 			ret_val = e1000e_phy_reset_dsp(hw);
@@ -1011,7 +1042,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	/* Resetting the phy means we need to re-force TX_CLK in the
+	/*
+	 * Resetting the phy means we need to re-force TX_CLK in the
 	 * Extended PHY Specific Control Register to 25MHz clock from
 	 * the reset value of 2.5MHz.
 	 */
@@ -1020,7 +1052,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 	if (ret_val)
 		return ret_val;
 
-	/* In addition, we must re-enable CRS on Tx for both half and full
+	/*
+	 * In addition, we must re-enable CRS on Tx for both half and full
 	 * duplex.
 	 */
 	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1051,7 +1084,7 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
 	u32 ctrl;
 
 	/* Turn off flow control when forcing speed/duplex */
-	mac->fc = e1000_fc_none;
+	hw->fc.type = e1000_fc_none;
 
 	/* Force speed/duplex on the mac */
 	ctrl = er32(CTRL);
@@ -1124,30 +1157,32 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
 					     data);
 		if (ret_val)
 			return ret_val;
-		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
 		 * during Dx states where the power conservation is most
 		 * important.  During driver activity we should enable
-		 * SmartSpeed, so performance is maintained. */
+		 * SmartSpeed, so performance is maintained.
+		 */
 		if (phy->smart_speed == e1000_smart_speed_on) {
 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						    &data);
+					   &data);
 			if (ret_val)
 				return ret_val;
 
 			data |= IGP01E1000_PSCFR_SMART_SPEED;
 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     data);
+					   data);
 			if (ret_val)
 				return ret_val;
 		} else if (phy->smart_speed == e1000_smart_speed_off) {
 			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     &data);
+					   &data);
 			if (ret_val)
 				return ret_val;
 
 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
 			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
-						     data);
+					   data);
 			if (ret_val)
 				return ret_val;
 		}
@@ -1249,8 +1284,10 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
 	s32 ret_val;
 	u16 data, offset, mask;
 
-	/* Polarity is determined based on the speed of
-	 * our connection. */
+	/*
+	 * Polarity is determined based on the speed of
+	 * our connection.
+	 */
 	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
 	if (ret_val)
 		return ret_val;
@@ -1260,7 +1297,8 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
 		offset	= IGP01E1000_PHY_PCS_INIT_REG;
 		mask	= IGP01E1000_PHY_POLARITY_MASK;
 	} else {
-		/* This really only applies to 10Mbps since
+		/*
+		 * This really only applies to 10Mbps since
 		 * there is no polarity for 100Mbps (always 0).
 		 */
 		offset	= IGP01E1000_PHY_PORT_STATUS;
@@ -1278,7 +1316,7 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
 }
 
 /**
- *  e1000_wait_autoneg - Wait for auto-neg compeletion
+ *  e1000_wait_autoneg - Wait for auto-neg completion
  *  @hw: pointer to the HW structure
  *
  *  Waits for auto-negotiation to complete or for the auto-negotiation time
@@ -1302,7 +1340,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
 		msleep(100);
 	}
 
-	/* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+	/*
+	 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
 	 * has completed.
 	 */
 	return ret_val;
@@ -1324,7 +1363,8 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
 	u16 i, phy_status;
 
 	for (i = 0; i < iterations; i++) {
-		/* Some PHYs require the PHY_STATUS register to be read
+		/*
+		 * Some PHYs require the PHY_STATUS register to be read
 		 * twice due to the link bit being sticky.  No harm doing
 		 * it across the board.
 		 */
@@ -1412,10 +1452,12 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
 		if (ret_val)
 			return ret_val;
 
-		/* Getting bits 15:9, which represent the combination of
+		/*
+		 * Getting bits 15:9, which represent the combination of
 		 * course and fine gain values.  The result is a number
 		 * that can be put into the lookup table to obtain the
-		 * approximate cable length. */
+		 * approximate cable length.
+		 */
 		cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
 				IGP02E1000_AGC_LENGTH_MASK;
 
@@ -1466,7 +1508,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
 	u16 phy_data;
 	bool link;
 
-	if (hw->media_type != e1000_media_type_copper) {
+	if (hw->phy.media_type != e1000_media_type_copper) {
 		hw_dbg(hw, "Phy info is only valid for copper media\n");
 		return -E1000_ERR_CONFIG;
 	}

+ 3 - 3
drivers/net/ehea/ehea.h

@@ -422,7 +422,7 @@ struct ehea_fw_handle_entry {
 struct ehea_fw_handle_array {
 	struct ehea_fw_handle_entry *arr;
 	int num_entries;
-	struct semaphore lock;
+	struct mutex lock;
 };
 
 struct ehea_bcmc_reg_entry {
@@ -435,7 +435,7 @@ struct ehea_bcmc_reg_entry {
 struct ehea_bcmc_reg_array {
 	struct ehea_bcmc_reg_entry *arr;
 	int num_entries;
-	struct semaphore lock;
+	struct mutex lock;
 };
 
 #define EHEA_PORT_UP 1
@@ -453,7 +453,7 @@ struct ehea_port {
 	struct vlan_group *vgrp;
 	struct ehea_eq *qp_eq;
 	struct work_struct reset_task;
-	struct semaphore port_lock;
+	struct mutex port_lock;
 	char int_aff_name[EHEA_IRQ_NAME_SIZE];
 	int allmulti;			 /* Indicates IFF_ALLMULTI state */
 	int promisc;		 	 /* Indicates IFF_PROMISC state */

+ 47 - 45
drivers/net/ehea/ehea_main.c

@@ -36,6 +36,7 @@
 #include <linux/notifier.h>
 #include <linux/reboot.h>
 #include <asm/kexec.h>
+#include <linux/mutex.h>
 
 #include <net/ip.h>
 
@@ -99,7 +100,7 @@ static int port_name_cnt;
 static LIST_HEAD(adapter_list);
 u64 ehea_driver_flags;
 struct work_struct ehea_rereg_mr_task;
-struct semaphore dlpar_mem_lock;
+static DEFINE_MUTEX(dlpar_mem_lock);
 struct ehea_fw_handle_array ehea_fw_handles;
 struct ehea_bcmc_reg_array ehea_bcmc_regs;
 
@@ -1761,7 +1762,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
 
 	memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
 
-	down(&ehea_bcmc_regs.lock);
+	mutex_lock(&ehea_bcmc_regs.lock);
 
 	/* Deregister old MAC in pHYP */
 	ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
@@ -1779,7 +1780,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
 
 out_upregs:
 	ehea_update_bcmc_registrations();
-	up(&ehea_bcmc_regs.lock);
+	mutex_unlock(&ehea_bcmc_regs.lock);
 out_free:
 	kfree(cb0);
 out:
@@ -1941,7 +1942,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
 	}
 	ehea_promiscuous(dev, 0);
 
-	down(&ehea_bcmc_regs.lock);
+	mutex_lock(&ehea_bcmc_regs.lock);
 
 	if (dev->flags & IFF_ALLMULTI) {
 		ehea_allmulti(dev, 1);
@@ -1972,7 +1973,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
 	}
 out:
 	ehea_update_bcmc_registrations();
-	up(&ehea_bcmc_regs.lock);
+	mutex_unlock(&ehea_bcmc_regs.lock);
 	return;
 }
 
@@ -2455,7 +2456,7 @@ static int ehea_up(struct net_device *dev)
 	if (port->state == EHEA_PORT_UP)
 		return 0;
 
-	down(&ehea_fw_handles.lock);
+	mutex_lock(&ehea_fw_handles.lock);
 
 	ret = ehea_port_res_setup(port, port->num_def_qps,
 				  port->num_add_tx_qps);
@@ -2493,7 +2494,7 @@ static int ehea_up(struct net_device *dev)
 		}
 	}
 
-	down(&ehea_bcmc_regs.lock);
+	mutex_lock(&ehea_bcmc_regs.lock);
 
 	ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
 	if (ret) {
@@ -2516,10 +2517,10 @@ out:
 		ehea_info("Failed starting %s. ret=%i", dev->name, ret);
 
 	ehea_update_bcmc_registrations();
-	up(&ehea_bcmc_regs.lock);
+	mutex_unlock(&ehea_bcmc_regs.lock);
 
 	ehea_update_firmware_handles();
-	up(&ehea_fw_handles.lock);
+	mutex_unlock(&ehea_fw_handles.lock);
 
 	return ret;
 }
@@ -2545,7 +2546,7 @@ static int ehea_open(struct net_device *dev)
 	int ret;
 	struct ehea_port *port = netdev_priv(dev);
 
-	down(&port->port_lock);
+	mutex_lock(&port->port_lock);
 
 	if (netif_msg_ifup(port))
 		ehea_info("enabling port %s", dev->name);
@@ -2556,7 +2557,7 @@ static int ehea_open(struct net_device *dev)
 		netif_start_queue(dev);
 	}
 
-	up(&port->port_lock);
+	mutex_unlock(&port->port_lock);
 
 	return ret;
 }
@@ -2569,18 +2570,18 @@ static int ehea_down(struct net_device *dev)
 	if (port->state == EHEA_PORT_DOWN)
 		return 0;
 
-	down(&ehea_bcmc_regs.lock);
+	mutex_lock(&ehea_fw_handles.lock);
+
+	mutex_lock(&ehea_bcmc_regs.lock);
 	ehea_drop_multicast_list(dev);
 	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
 
 	ehea_free_interrupts(dev);
 
-	down(&ehea_fw_handles.lock);
-
 	port->state = EHEA_PORT_DOWN;
 
 	ehea_update_bcmc_registrations();
-	up(&ehea_bcmc_regs.lock);
+	mutex_unlock(&ehea_bcmc_regs.lock);
 
 	ret = ehea_clean_all_portres(port);
 	if (ret)
@@ -2588,7 +2589,7 @@ static int ehea_down(struct net_device *dev)
 			  dev->name, ret);
 
 	ehea_update_firmware_handles();
-	up(&ehea_fw_handles.lock);
+	mutex_unlock(&ehea_fw_handles.lock);
 
 	return ret;
 }
@@ -2602,11 +2603,11 @@ static int ehea_stop(struct net_device *dev)
 		ehea_info("disabling port %s", dev->name);
 
 	flush_scheduled_work();
-	down(&port->port_lock);
+	mutex_lock(&port->port_lock);
 	netif_stop_queue(dev);
 	port_napi_disable(port);
 	ret = ehea_down(dev);
-	up(&port->port_lock);
+	mutex_unlock(&port->port_lock);
 	return ret;
 }
 
@@ -2820,7 +2821,7 @@ static void ehea_reset_port(struct work_struct *work)
 	struct net_device *dev = port->netdev;
 
 	port->resets++;
-	down(&port->port_lock);
+	mutex_lock(&port->port_lock);
 	netif_stop_queue(dev);
 
 	port_napi_disable(port);
@@ -2840,7 +2841,7 @@ static void ehea_reset_port(struct work_struct *work)
 
 	netif_wake_queue(dev);
 out:
-	up(&port->port_lock);
+	mutex_unlock(&port->port_lock);
 	return;
 }
 
@@ -2849,7 +2850,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
 	int ret, i;
 	struct ehea_adapter *adapter;
 
-	down(&dlpar_mem_lock);
+	mutex_lock(&dlpar_mem_lock);
 	ehea_info("LPAR memory enlarged - re-initializing driver");
 
 	list_for_each_entry(adapter, &adapter_list, list)
@@ -2857,22 +2858,24 @@ static void ehea_rereg_mrs(struct work_struct *work)
 			/* Shutdown all ports */
 			for (i = 0; i < EHEA_MAX_PORTS; i++) {
 				struct ehea_port *port = adapter->port[i];
+				struct net_device *dev;
 
-				if (port) {
-					struct net_device *dev = port->netdev;
+				if (!port)
+					continue;
 
-					if (dev->flags & IFF_UP) {
-						down(&port->port_lock);
-						netif_stop_queue(dev);
-						ehea_flush_sq(port);
-						ret = ehea_stop_qps(dev);
-						if (ret) {
-							up(&port->port_lock);
-							goto out;
-						}
-						port_napi_disable(port);
-						up(&port->port_lock);
+				dev = port->netdev;
+
+				if (dev->flags & IFF_UP) {
+					mutex_lock(&port->port_lock);
+					netif_stop_queue(dev);
+					ehea_flush_sq(port);
+					ret = ehea_stop_qps(dev);
+					if (ret) {
+						mutex_unlock(&port->port_lock);
+						goto out;
 					}
+					port_napi_disable(port);
+					mutex_unlock(&port->port_lock);
 				}
 			}
 
@@ -2912,17 +2915,17 @@ static void ehea_rereg_mrs(struct work_struct *work)
 					struct net_device *dev = port->netdev;
 
 					if (dev->flags & IFF_UP) {
-						down(&port->port_lock);
+						mutex_lock(&port->port_lock);
 						port_napi_enable(port);
 						ret = ehea_restart_qps(dev);
 						if (!ret)
 							netif_wake_queue(dev);
-						up(&port->port_lock);
+						mutex_unlock(&port->port_lock);
 					}
 				}
 			}
 		}
-       up(&dlpar_mem_lock);
+       mutex_unlock(&dlpar_mem_lock);
        ehea_info("re-initializing driver complete");
 out:
 	return;
@@ -3083,7 +3086,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
 
 	port = netdev_priv(dev);
 
-	sema_init(&port->port_lock, 1);
+	mutex_init(&port->port_lock);
 	port->state = EHEA_PORT_DOWN;
 	port->sig_comp_iv = sq_entries / 10;
 
@@ -3362,7 +3365,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
 		ehea_error("Invalid ibmebus device probed");
 		return -EINVAL;
 	}
-	down(&ehea_fw_handles.lock);
+	mutex_lock(&ehea_fw_handles.lock);
 
 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
 	if (!adapter) {
@@ -3446,7 +3449,7 @@ out_free_ad:
 
 out:
 	ehea_update_firmware_handles();
-	up(&ehea_fw_handles.lock);
+	mutex_unlock(&ehea_fw_handles.lock);
 	return ret;
 }
 
@@ -3465,7 +3468,7 @@ static int __devexit ehea_remove(struct of_device *dev)
 
 	flush_scheduled_work();
 
-	down(&ehea_fw_handles.lock);
+	mutex_lock(&ehea_fw_handles.lock);
 
 	ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
 	tasklet_kill(&adapter->neq_tasklet);
@@ -3476,7 +3479,7 @@ static int __devexit ehea_remove(struct of_device *dev)
 	kfree(adapter);
 
 	ehea_update_firmware_handles();
-	up(&ehea_fw_handles.lock);
+	mutex_unlock(&ehea_fw_handles.lock);
 
 	return 0;
 }
@@ -3563,9 +3566,8 @@ int __init ehea_module_init(void)
 	memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
 	memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
 
-	sema_init(&dlpar_mem_lock, 1);
-	sema_init(&ehea_fw_handles.lock, 1);
-	sema_init(&ehea_bcmc_regs.lock, 1);
+	mutex_init(&ehea_fw_handles.lock);
+	mutex_init(&ehea_bcmc_regs.lock);
 
 	ret = check_module_parm();
 	if (ret)

+ 1 - 1
drivers/net/fec_mpc52xx.c

@@ -198,7 +198,7 @@ static int mpc52xx_fec_init_phy(struct net_device *dev)
 	struct phy_device *phydev;
 	char phy_id[BUS_ID_SIZE];
 
-	snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
+	snprintf(phy_id, BUS_ID_SIZE, "%x:%02x",
 			(unsigned int)dev->base_addr, priv->phy_addr);
 
 	priv->link = PHY_DOWN;

+ 1 - 1
drivers/net/fec_mpc52xx_phy.c

@@ -124,7 +124,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
 		goto out_free;
 	}
 
-	bus->id = res.start;
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
 	bus->priv = priv;
 
 	bus->dev = dev;

+ 78 - 162
drivers/net/forcedeth.c

@@ -29,90 +29,6 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  *
- * Changelog:
- * 	0.01: 05 Oct 2003: First release that compiles without warnings.
- * 	0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
- * 			   Check all PCI BARs for the register window.
- * 			   udelay added to mii_rw.
- * 	0.03: 06 Oct 2003: Initialize dev->irq.
- * 	0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
- * 	0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
- * 	0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
- * 			   irq mask updated
- * 	0.07: 14 Oct 2003: Further irq mask updates.
- * 	0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
- * 			   added into irq handler, NULL check for drain_ring.
- * 	0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
- * 			   requested interrupt sources.
- * 	0.10: 20 Oct 2003: First cleanup for release.
- * 	0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
- * 			   MAC Address init fix, set_multicast cleanup.
- * 	0.12: 23 Oct 2003: Cleanups for release.
- * 	0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
- * 			   Set link speed correctly. start rx before starting
- * 			   tx (nv_start_rx sets the link speed).
- * 	0.14: 25 Oct 2003: Nic dependant irq mask.
- * 	0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
- * 			   open.
- * 	0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
- * 			   increased to 1628 bytes.
- * 	0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
- * 			   the tx length.
- * 	0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
- * 	0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
- * 			   addresses, really stop rx if already running
- * 			   in nv_start_rx, clean up a bit.
- * 	0.20: 07 Dec 2003: alloc fixes
- * 	0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
- *	0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
- *			   on close.
- *	0.23: 26 Jan 2004: various small cleanups
- *	0.24: 27 Feb 2004: make driver even less anonymous in backtraces
- *	0.25: 09 Mar 2004: wol support
- *	0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
- *	0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
- *			   added CK804/MCP04 device IDs, code fixes
- *			   for registers, link status and other minor fixes.
- *	0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
- *	0.29: 31 Aug 2004: Add backup timer for link change notification.
- *	0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
- *			   into nv_close, otherwise reenabling for wol can
- *			   cause DMA to kfree'd memory.
- *	0.31: 14 Nov 2004: ethtool support for getting/setting link
- *			   capabilities.
- *	0.32: 16 Apr 2005: RX_ERROR4 handling added.
- *	0.33: 16 May 2005: Support for MCP51 added.
- *	0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
- *	0.35: 26 Jun 2005: Support for MCP55 added.
- *	0.36: 28 Jun 2005: Add jumbo frame support.
- *	0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
- *	0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
- *			   per-packet flags.
- *	0.39: 18 Jul 2005: Add 64bit descriptor support.
- *	0.40: 19 Jul 2005: Add support for mac address change.
- *	0.41: 30 Jul 2005: Write back original MAC in nv_close instead
- *			   of nv_remove
- *	0.42: 06 Aug 2005: Fix lack of link speed initialization
- *			   in the second (and later) nv_open call
- *	0.43: 10 Aug 2005: Add support for tx checksum.
- *	0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
- *	0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
- *	0.46: 20 Oct 2005: Add irq optimization modes.
- *	0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
- *	0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
- *	0.49: 10 Dec 2005: Fix tso for large buffers.
- *	0.50: 20 Jan 2006: Add 8021pq tagging support.
- *	0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
- *	0.52: 20 Jan 2006: Add MSI/MSIX support.
- *	0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
- *	0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
- *	0.55: 22 Mar 2006: Add flow control (pause frame).
- *	0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
- *	0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
- *	0.58: 30 Oct 2006: Added support for sideband management unit.
- *	0.59: 30 Oct 2006: Added support for recoverable error.
- *	0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
- *
  * Known bugs:
  * We suspect that on some hardware no TX done interrupts are generated.
  * This means recovery from netif_stop_queue only happens if the hw timer
@@ -123,11 +39,6 @@
  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  * superfluous timer interrupts from the nic.
  */
-#ifdef CONFIG_FORCEDETH_NAPI
-#define DRIVERNAPI "-NAPI"
-#else
-#define DRIVERNAPI
-#endif
 #define FORCEDETH_VERSION		"0.61"
 #define DRV_NAME			"forcedeth"
 
@@ -930,6 +841,13 @@ static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
 	return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
 }
 
+static bool nv_optimized(struct fe_priv *np)
+{
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+		return false;
+	return true;
+}
+
 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
 				int delay, int delaymax, const char *msg)
 {
@@ -966,7 +884,7 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
 	struct fe_priv *np = get_nvpriv(dev);
 	u8 __iomem *base = get_hwbase(dev);
 
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+	if (!nv_optimized(np)) {
 		if (rxtx_flags & NV_SETUP_RX_RING) {
 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
 		}
@@ -989,7 +907,7 @@ static void free_rings(struct net_device *dev)
 {
 	struct fe_priv *np = get_nvpriv(dev);
 
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+	if (!nv_optimized(np)) {
 		if (np->rx_ring.orig)
 			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
 					    np->rx_ring.orig, np->ring_addr);
@@ -1435,6 +1353,18 @@ static void nv_stop_tx(struct net_device *dev)
 		       base + NvRegTransmitPoll);
 }
 
+static void nv_start_rxtx(struct net_device *dev)
+{
+	nv_start_rx(dev);
+	nv_start_tx(dev);
+}
+
+static void nv_stop_rxtx(struct net_device *dev)
+{
+	nv_stop_rx(dev);
+	nv_stop_tx(dev);
+}
+
 static void nv_txrx_reset(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
@@ -1657,7 +1587,7 @@ static void nv_do_rx_refill(unsigned long data)
 	} else {
 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
 	}
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+	if (!nv_optimized(np))
 		retcode = nv_alloc_rx(dev);
 	else
 		retcode = nv_alloc_rx_optimized(dev);
@@ -1682,8 +1612,10 @@ static void nv_init_rx(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
 	int i;
+
 	np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+
+	if (!nv_optimized(np))
 		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
 	else
 		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
@@ -1691,7 +1623,7 @@ static void nv_init_rx(struct net_device *dev)
 	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
 
 	for (i = 0; i < np->rx_ring_size; i++) {
-		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		if (!nv_optimized(np)) {
 			np->rx_ring.orig[i].flaglen = 0;
 			np->rx_ring.orig[i].buf = 0;
 		} else {
@@ -1709,8 +1641,10 @@ static void nv_init_tx(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
 	int i;
+
 	np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+
+	if (!nv_optimized(np))
 		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
 	else
 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
@@ -1721,7 +1655,7 @@ static void nv_init_tx(struct net_device *dev)
 	np->tx_end_flip = NULL;
 
 	for (i = 0; i < np->tx_ring_size; i++) {
-		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		if (!nv_optimized(np)) {
 			np->tx_ring.orig[i].flaglen = 0;
 			np->tx_ring.orig[i].buf = 0;
 		} else {
@@ -1744,7 +1678,8 @@ static int nv_init_ring(struct net_device *dev)
 
 	nv_init_tx(dev);
 	nv_init_rx(dev);
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+
+	if (!nv_optimized(np))
 		return nv_alloc_rx(dev);
 	else
 		return nv_alloc_rx_optimized(dev);
@@ -1775,7 +1710,7 @@ static void nv_drain_tx(struct net_device *dev)
 	unsigned int i;
 
 	for (i = 0; i < np->tx_ring_size; i++) {
-		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		if (!nv_optimized(np)) {
 			np->tx_ring.orig[i].flaglen = 0;
 			np->tx_ring.orig[i].buf = 0;
 		} else {
@@ -1802,7 +1737,7 @@ static void nv_drain_rx(struct net_device *dev)
 	int i;
 
 	for (i = 0; i < np->rx_ring_size; i++) {
-		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		if (!nv_optimized(np)) {
 			np->rx_ring.orig[i].flaglen = 0;
 			np->rx_ring.orig[i].buf = 0;
 		} else {
@@ -1823,7 +1758,7 @@ static void nv_drain_rx(struct net_device *dev)
 	}
 }
 
-static void drain_ring(struct net_device *dev)
+static void nv_drain_rxtx(struct net_device *dev)
 {
 	nv_drain_tx(dev);
 	nv_drain_rx(dev);
@@ -2260,7 +2195,7 @@ static void nv_tx_timeout(struct net_device *dev)
 		}
 		printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
 		for (i=0;i<np->tx_ring_size;i+= 4) {
-			if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+			if (!nv_optimized(np)) {
 				printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
 				       i,
 				       le32_to_cpu(np->tx_ring.orig[i].buf),
@@ -2296,7 +2231,7 @@ static void nv_tx_timeout(struct net_device *dev)
 	nv_stop_tx(dev);
 
 	/* 2) check that the packets were not sent already: */
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+	if (!nv_optimized(np))
 		nv_tx_done(dev);
 	else
 		nv_tx_done_optimized(dev, np->tx_ring_size);
@@ -2663,12 +2598,10 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
 		netif_tx_lock_bh(dev);
 		spin_lock(&np->lock);
 		/* stop engines */
-		nv_stop_rx(dev);
-		nv_stop_tx(dev);
+		nv_stop_rxtx(dev);
 		nv_txrx_reset(dev);
 		/* drain rx queue */
-		nv_drain_rx(dev);
-		nv_drain_tx(dev);
+		nv_drain_rxtx(dev);
 		/* reinit driver view of the rx queue */
 		set_bufsize(dev);
 		if (nv_init_ring(dev)) {
@@ -2685,8 +2618,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
 		pci_push(base);
 
 		/* restart rx engine */
-		nv_start_rx(dev);
-		nv_start_tx(dev);
+		nv_start_rxtx(dev);
 		spin_unlock(&np->lock);
 		netif_tx_unlock_bh(dev);
 		nv_enable_irq(dev);
@@ -3393,7 +3325,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
 	unsigned long flags;
 	int pkts, retcode;
 
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+	if (!nv_optimized(np)) {
 		pkts = nv_rx_process(dev, budget);
 		retcode = nv_alloc_rx(dev);
 	} else {
@@ -3634,7 +3566,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
 	if (intr_test) {
 		handler = nv_nic_irq_test;
 	} else {
-		if (np->desc_ver == DESC_VER_3)
+		if (nv_optimized(np))
 			handler = nv_nic_irq_optimized;
 		else
 			handler = nv_nic_irq;
@@ -3787,12 +3719,10 @@ static void nv_do_nic_poll(unsigned long data)
 			netif_tx_lock_bh(dev);
 			spin_lock(&np->lock);
 			/* stop engines */
-			nv_stop_rx(dev);
-			nv_stop_tx(dev);
+			nv_stop_rxtx(dev);
 			nv_txrx_reset(dev);
 			/* drain rx queue */
-			nv_drain_rx(dev);
-			nv_drain_tx(dev);
+			nv_drain_rxtx(dev);
 			/* reinit driver view of the rx queue */
 			set_bufsize(dev);
 			if (nv_init_ring(dev)) {
@@ -3809,8 +3739,7 @@ static void nv_do_nic_poll(unsigned long data)
 			pci_push(base);
 
 			/* restart rx engine */
-			nv_start_rx(dev);
-			nv_start_tx(dev);
+			nv_start_rxtx(dev);
 			spin_unlock(&np->lock);
 			netif_tx_unlock_bh(dev);
 		}
@@ -3821,7 +3750,7 @@ static void nv_do_nic_poll(unsigned long data)
 	pci_push(base);
 
 	if (!using_multi_irqs(dev)) {
-		if (np->desc_ver == DESC_VER_3)
+		if (nv_optimized(np))
 			nv_nic_irq_optimized(0, dev);
 		else
 			nv_nic_irq(0, dev);
@@ -3860,7 +3789,8 @@ static void nv_do_stats_poll(unsigned long data)
 	nv_get_hw_stats(dev);
 
 	if (!np->in_shutdown)
-		mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
+		mod_timer(&np->stats_poll,
+			round_jiffies(jiffies + STATS_INTERVAL));
 }
 
 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -4018,8 +3948,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 		netif_tx_lock_bh(dev);
 		spin_lock(&np->lock);
 		/* stop engines */
-		nv_stop_rx(dev);
-		nv_stop_tx(dev);
+		nv_stop_rxtx(dev);
 		spin_unlock(&np->lock);
 		netif_tx_unlock_bh(dev);
 	}
@@ -4125,8 +4054,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 	}
 
 	if (netif_running(dev)) {
-		nv_start_rx(dev);
-		nv_start_tx(dev);
+		nv_start_rxtx(dev);
 		nv_enable_irq(dev);
 	}
 
@@ -4169,8 +4097,7 @@ static int nv_nway_reset(struct net_device *dev)
 			netif_tx_lock_bh(dev);
 			spin_lock(&np->lock);
 			/* stop engines */
-			nv_stop_rx(dev);
-			nv_stop_tx(dev);
+			nv_stop_rxtx(dev);
 			spin_unlock(&np->lock);
 			netif_tx_unlock_bh(dev);
 			printk(KERN_INFO "%s: link down.\n", dev->name);
@@ -4190,8 +4117,7 @@ static int nv_nway_reset(struct net_device *dev)
 		}
 
 		if (netif_running(dev)) {
-			nv_start_rx(dev);
-			nv_start_tx(dev);
+			nv_start_rxtx(dev);
 			nv_enable_irq(dev);
 		}
 		ret = 0;
@@ -4248,7 +4174,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
 	}
 
 	/* allocate new rings */
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+	if (!nv_optimized(np)) {
 		rxtx_ring = pci_alloc_consistent(np->pci_dev,
 					    sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
 					    &ring_addr);
@@ -4261,7 +4187,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
 	tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
 	if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
 		/* fall back to old rings */
-		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		if (!nv_optimized(np)) {
 			if (rxtx_ring)
 				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
 						    rxtx_ring, ring_addr);
@@ -4282,12 +4208,10 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
 		netif_tx_lock_bh(dev);
 		spin_lock(&np->lock);
 		/* stop engines */
-		nv_stop_rx(dev);
-		nv_stop_tx(dev);
+		nv_stop_rxtx(dev);
 		nv_txrx_reset(dev);
 		/* drain queues */
-		nv_drain_rx(dev);
-		nv_drain_tx(dev);
+		nv_drain_rxtx(dev);
 		/* delete queues */
 		free_rings(dev);
 	}
@@ -4295,7 +4219,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
 	/* set new values */
 	np->rx_ring_size = ring->rx_pending;
 	np->tx_ring_size = ring->tx_pending;
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+
+	if (!nv_optimized(np)) {
 		np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
 	} else {
@@ -4327,8 +4252,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
 		pci_push(base);
 
 		/* restart engines */
-		nv_start_rx(dev);
-		nv_start_tx(dev);
+		nv_start_rxtx(dev);
 		spin_unlock(&np->lock);
 		netif_tx_unlock_bh(dev);
 		nv_enable_irq(dev);
@@ -4369,8 +4293,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
 		netif_tx_lock_bh(dev);
 		spin_lock(&np->lock);
 		/* stop engines */
-		nv_stop_rx(dev);
-		nv_stop_tx(dev);
+		nv_stop_rxtx(dev);
 		spin_unlock(&np->lock);
 		netif_tx_unlock_bh(dev);
 	}
@@ -4411,8 +4334,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
 	}
 
 	if (netif_running(dev)) {
-		nv_start_rx(dev);
-		nv_start_tx(dev);
+		nv_start_rxtx(dev);
 		nv_enable_irq(dev);
 	}
 	return 0;
@@ -4648,8 +4570,7 @@ static int nv_loopback_test(struct net_device *dev)
 	pci_push(base);
 
 	/* restart rx engine */
-	nv_start_rx(dev);
-	nv_start_tx(dev);
+	nv_start_rxtx(dev);
 
 	/* setup packet for tx */
 	pkt_len = ETH_DATA_LEN;
@@ -4667,7 +4588,7 @@ static int nv_loopback_test(struct net_device *dev)
 	for (i = 0; i < pkt_len; i++)
 		pkt_data[i] = (u8)(i & 0xff);
 
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+	if (!nv_optimized(np)) {
 		np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
 		np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
 	} else {
@@ -4681,7 +4602,7 @@ static int nv_loopback_test(struct net_device *dev)
 	msleep(500);
 
 	/* check for rx of the packet */
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+	if (!nv_optimized(np)) {
 		flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
 		len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
 
@@ -4727,12 +4648,10 @@ static int nv_loopback_test(struct net_device *dev)
 	dev_kfree_skb_any(tx_skb);
  out:
 	/* stop engines */
-	nv_stop_rx(dev);
-	nv_stop_tx(dev);
+	nv_stop_rxtx(dev);
 	nv_txrx_reset(dev);
 	/* drain rx queue */
-	nv_drain_rx(dev);
-	nv_drain_tx(dev);
+	nv_drain_rxtx(dev);
 
 	if (netif_running(dev)) {
 		writel(misc1_flags, base + NvRegMisc1);
@@ -4770,12 +4689,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
 			}
 			/* stop engines */
-			nv_stop_rx(dev);
-			nv_stop_tx(dev);
+			nv_stop_rxtx(dev);
 			nv_txrx_reset(dev);
 			/* drain rx queue */
-			nv_drain_rx(dev);
-			nv_drain_tx(dev);
+			nv_drain_rxtx(dev);
 			spin_unlock_irq(&np->lock);
 			netif_tx_unlock_bh(dev);
 		}
@@ -4816,8 +4733,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
 			pci_push(base);
 			/* restart rx engine */
-			nv_start_rx(dev);
-			nv_start_tx(dev);
+			nv_start_rxtx(dev);
 			netif_start_queue(dev);
 #ifdef CONFIG_FORCEDETH_NAPI
 			napi_enable(&np->napi);
@@ -5046,8 +4962,7 @@ static int nv_open(struct net_device *dev)
 	 * to init hw */
 	np->linkspeed = 0;
 	ret = nv_update_linkspeed(dev);
-	nv_start_rx(dev);
-	nv_start_tx(dev);
+	nv_start_rxtx(dev);
 	netif_start_queue(dev);
 #ifdef CONFIG_FORCEDETH_NAPI
 	napi_enable(&np->napi);
@@ -5064,13 +4979,14 @@ static int nv_open(struct net_device *dev)
 
 	/* start statistics timer */
 	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
-		mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
+		mod_timer(&np->stats_poll,
+			round_jiffies(jiffies + STATS_INTERVAL));
 
 	spin_unlock_irq(&np->lock);
 
 	return 0;
 out_drain:
-	drain_ring(dev);
+	nv_drain_rxtx(dev);
 	return ret;
 }
 
@@ -5093,8 +5009,7 @@ static int nv_close(struct net_device *dev)
 
 	netif_stop_queue(dev);
 	spin_lock_irq(&np->lock);
-	nv_stop_tx(dev);
-	nv_stop_rx(dev);
+	nv_stop_rxtx(dev);
 	nv_txrx_reset(dev);
 
 	/* disable interrupts on the nic or we will lock up */
@@ -5107,7 +5022,7 @@ static int nv_close(struct net_device *dev)
 
 	nv_free_irq(dev);
 
-	drain_ring(dev);
+	nv_drain_rxtx(dev);
 
 	if (np->wolenabled) {
 		writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
@@ -5267,7 +5182,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
 	np->rx_ring_size = RX_RING_DEFAULT;
 	np->tx_ring_size = TX_RING_DEFAULT;
 
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+	if (!nv_optimized(np)) {
 		np->rx_ring.orig = pci_alloc_consistent(pci_dev,
 					sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
 					&np->ring_addr);
@@ -5289,7 +5204,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
 
 	dev->open = nv_open;
 	dev->stop = nv_close;
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+
+	if (!nv_optimized(np))
 		dev->hard_start_xmit = nv_start_xmit;
 	else
 		dev->hard_start_xmit = nv_start_xmit_optimized;

+ 2 - 2
drivers/net/fs_enet/fs_enet-main.c

@@ -1178,7 +1178,7 @@ static int __devinit find_phy(struct device_node *np,
 
 	data  = of_get_property(np, "fixed-link", NULL);
 	if (data) {
-		snprintf(fpi->bus_id, 16, PHY_ID_FMT, 0, *data);
+		snprintf(fpi->bus_id, 16, "%x:%02x", 0, *data);
 		return 0;
 	}
 
@@ -1202,7 +1202,7 @@ static int __devinit find_phy(struct device_node *np,
 	if (!data || len != 4)
 		goto out_put_mdio;
 
-	snprintf(fpi->bus_id, 16, PHY_ID_FMT, res.start, *data);
+	snprintf(fpi->bus_id, 16, "%x:%02x", res.start, *data);
 
 out_put_mdio:
 	of_node_put(mdionode);

+ 2 - 2
drivers/net/fs_enet/mii-bitbang.c

@@ -130,7 +130,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
 	 * we get is an int, and the odds of multiple bitbang mdio buses
 	 * is low enough that it's not worth going too crazy.
 	 */
-	bus->id = res.start;
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
 
 	data = of_get_property(np, "fsl,mdio-pin", &len);
 	if (!data || len != 4)
@@ -307,7 +307,7 @@ static int __devinit fs_enet_mdio_probe(struct device *dev)
 		return -ENOMEM;
 
 	new_bus->name = "BB MII Bus",
-	new_bus->id = pdev->id;
+	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
 
 	new_bus->phy_mask = ~0x9;
 	pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;

+ 2 - 2
drivers/net/fs_enet/mii-fec.c

@@ -196,7 +196,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
 	if (ret)
 		return ret;
 
-	new_bus->id = res.start;
+	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
 
 	fec->fecp = ioremap(res.start, res.end - res.start + 1);
 	if (!fec->fecp)
@@ -309,7 +309,7 @@ static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
 	new_bus->read = &fs_enet_fec_mii_read,
 	new_bus->write = &fs_enet_fec_mii_write,
 	new_bus->reset = &fs_enet_fec_mii_reset,
-	new_bus->id = pdev->id;
+	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
 
 	pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;
 

+ 53 - 27
drivers/net/gianfar.c

@@ -1185,7 +1185,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
 	int frame_size = new_mtu + ETH_HLEN;
 
 	if (priv->vlan_enable)
-		frame_size += VLAN_ETH_HLEN;
+		frame_size += VLAN_HLEN;
 
 	if (gfar_uses_fcb(priv))
 		frame_size += GMAC_FCB_LEN;
@@ -1250,17 +1250,12 @@ static void gfar_timeout(struct net_device *dev)
 }
 
 /* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *dev_id)
+int gfar_clean_tx_ring(struct net_device *dev)
 {
-	struct net_device *dev = (struct net_device *) dev_id;
-	struct gfar_private *priv = netdev_priv(dev);
 	struct txbd8 *bdp;
+	struct gfar_private *priv = netdev_priv(dev);
+	int howmany = 0;
 
-	/* Clear IEVENT */
-	gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
-
-	/* Lock priv */
-	spin_lock(&priv->txlock);
 	bdp = priv->dirty_tx;
 	while ((bdp->status & TXBD_READY) == 0) {
 		/* If dirty_tx and cur_tx are the same, then either the */
@@ -1269,7 +1264,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
 		if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
 			break;
 
-		dev->stats.tx_packets++;
+		howmany++;
 
 		/* Deferred means some collisions occurred during transmit, */
 		/* but we eventually sent the packet. */
@@ -1278,11 +1273,15 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
 
 		/* Free the sk buffer associated with this TxBD */
 		dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
+
 		priv->tx_skbuff[priv->skb_dirtytx] = NULL;
 		priv->skb_dirtytx =
 		    (priv->skb_dirtytx +
 		     1) & TX_RING_MOD_MASK(priv->tx_ring_size);
 
+		/* Clean BD length for empty detection */
+		bdp->length = 0;
+
 		/* update bdp to point at next bd in the ring (wrapping if necessary) */
 		if (bdp->status & TXBD_WRAP)
 			bdp = priv->tx_bd_base;
@@ -1297,13 +1296,32 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
 			netif_wake_queue(dev);
 	} /* while ((bdp->status & TXBD_READY) == 0) */
 
+	dev->stats.tx_packets += howmany;
+
+	return howmany;
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *) dev_id;
+	struct gfar_private *priv = netdev_priv(dev);
+
+	/* Clear IEVENT */
+	gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
+
+	/* Lock priv */
+	spin_lock(&priv->txlock);
+
+	gfar_clean_tx_ring(dev);
+
 	/* If we are coalescing the interrupts, reset the timer */
 	/* Otherwise, clear it */
-	if (priv->txcoalescing)
+	if (likely(priv->txcoalescing)) {
+		gfar_write(&priv->regs->txic, 0);
 		gfar_write(&priv->regs->txic,
 			   mk_ic_value(priv->txcount, priv->txtime));
-	else
-		gfar_write(&priv->regs->txic, 0);
+	}
 
 	spin_unlock(&priv->txlock);
 
@@ -1392,15 +1410,15 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
 	unsigned long flags;
 #endif
 
-	/* Clear IEVENT, so rx interrupt isn't called again
-	 * because of this interrupt */
-	gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
-
 	/* support NAPI */
 #ifdef CONFIG_GFAR_NAPI
+	/* Clear IEVENT, so interrupts aren't called again
+	 * because of the packets that have already arrived */
+	gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
+
 	if (netif_rx_schedule_prep(dev, &priv->napi)) {
 		tempval = gfar_read(&priv->regs->imask);
-		tempval &= IMASK_RX_DISABLED;
+		tempval &= IMASK_RTX_DISABLED;
 		gfar_write(&priv->regs->imask, tempval);
 
 		__netif_rx_schedule(dev, &priv->napi);
@@ -1411,17 +1429,20 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
 				gfar_read(&priv->regs->imask));
 	}
 #else
+	/* Clear IEVENT, so rx interrupt isn't called again
+	 * because of this interrupt */
+	gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
 
 	spin_lock_irqsave(&priv->rxlock, flags);
 	gfar_clean_rx_ring(dev, priv->rx_ring_size);
 
 	/* If we are coalescing interrupts, update the timer */
 	/* Otherwise, clear it */
-	if (priv->rxcoalescing)
+	if (likely(priv->rxcoalescing)) {
+		gfar_write(&priv->regs->rxic, 0);
 		gfar_write(&priv->regs->rxic,
 			   mk_ic_value(priv->rxcount, priv->rxtime));
-	else
-		gfar_write(&priv->regs->rxic, 0);
+	}
 
 	spin_unlock_irqrestore(&priv->rxlock, flags);
 #endif
@@ -1526,9 +1547,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
 		rmb();
 		skb = priv->rx_skbuff[priv->skb_currx];
 
-		if (!(bdp->status &
-		      (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
-		       | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
+		if ((bdp->status & RXBD_LAST) && !(bdp->status & RXBD_ERR)) {
 			/* Increment the number of packets */
 			dev->stats.rx_packets++;
 			howmany++;
@@ -1582,6 +1601,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
 	struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
 	struct net_device *dev = priv->dev;
 	int howmany;
+	unsigned long flags;
+
+	/* If we fail to get the lock, don't bother with the TX BDs */
+	if (spin_trylock_irqsave(&priv->txlock, flags)) {
+		gfar_clean_tx_ring(dev);
+		spin_unlock_irqrestore(&priv->txlock, flags);
+	}
 
 	howmany = gfar_clean_rx_ring(dev, budget);
 
@@ -1595,11 +1621,11 @@ static int gfar_poll(struct napi_struct *napi, int budget)
 
 		/* If we are coalescing interrupts, update the timer */
 		/* Otherwise, clear it */
-		if (priv->rxcoalescing)
+		if (likely(priv->rxcoalescing)) {
+			gfar_write(&priv->regs->rxic, 0);
 			gfar_write(&priv->regs->rxic,
 				   mk_ic_value(priv->rxcount, priv->rxtime));
-		else
-			gfar_write(&priv->regs->rxic, 0);
+		}
 	}
 
 	return howmany;

+ 16 - 4
drivers/net/gianfar.h

@@ -102,7 +102,7 @@ extern const char gfar_driver_version[];
 #define DEFAULT_FIFO_TX_STARVE 0x40
 #define DEFAULT_FIFO_TX_STARVE_OFF 0x80
 #define DEFAULT_BD_STASH 1
-#define DEFAULT_STASH_LENGTH	64
+#define DEFAULT_STASH_LENGTH	96
 #define DEFAULT_STASH_INDEX	0
 
 /* The number of Exact Match registers */
@@ -124,11 +124,18 @@ extern const char gfar_driver_version[];
 
 #define DEFAULT_TX_COALESCE 1
 #define DEFAULT_TXCOUNT	16
-#define DEFAULT_TXTIME	4
+#define DEFAULT_TXTIME	21
 
+#define DEFAULT_RXTIME	21
+
+/* Non NAPI Case */
+#ifndef CONFIG_GFAR_NAPI
 #define DEFAULT_RX_COALESCE 1
 #define DEFAULT_RXCOUNT	16
-#define DEFAULT_RXTIME	4
+#else
+#define DEFAULT_RX_COALESCE 0
+#define DEFAULT_RXCOUNT	0
+#endif /* CONFIG_GFAR_NAPI */
 
 #define TBIPA_VALUE		0x1f
 #define MIIMCFG_INIT_VALUE	0x00000007
@@ -242,6 +249,7 @@ extern const char gfar_driver_version[];
 #define IEVENT_PERR		0x00000001
 #define IEVENT_RX_MASK          (IEVENT_RXB0 | IEVENT_RXF0)
 #define IEVENT_TX_MASK          (IEVENT_TXB | IEVENT_TXF)
+#define IEVENT_RTX_MASK         (IEVENT_RX_MASK | IEVENT_TX_MASK)
 #define IEVENT_ERR_MASK         \
 (IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
  IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
@@ -269,11 +277,12 @@ extern const char gfar_driver_version[];
 #define IMASK_FIQ		0x00000004
 #define IMASK_DPE		0x00000002
 #define IMASK_PERR		0x00000001
-#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY)
 #define IMASK_DEFAULT  (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
 		IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
 		IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
 		| IMASK_PERR)
+#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \
+			   & IMASK_DEFAULT)
 
 /* Fifo management */
 #define FIFO_TX_THR_MASK	0x01ff
@@ -340,6 +349,9 @@ extern const char gfar_driver_version[];
 #define RXBD_OVERRUN		0x0002
 #define RXBD_TRUNCATED		0x0001
 #define RXBD_STATS		0x01ff
+#define RXBD_ERR		(RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET 	\
+				| RXBD_CRCERR | RXBD_OVERRUN			\
+				| RXBD_TRUNCATED)
 
 /* Rx FCB status field bits */
 #define RXFCB_VLN		0x8000

+ 1 - 1
drivers/net/gianfar_mii.c

@@ -173,7 +173,7 @@ int gfar_mdio_probe(struct device *dev)
 	new_bus->read = &gfar_mdio_read,
 	new_bus->write = &gfar_mdio_write,
 	new_bus->reset = &gfar_mdio_reset,
-	new_bus->id = pdev->id;
+	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
 
 	pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data;
 

+ 2 - 2
drivers/net/hamradio/bpqether.c

@@ -172,7 +172,7 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
 	struct ethhdr *eth;
 	struct bpqdev *bpq;
 
-	if (dev->nd_net != &init_net)
+	if (dev_net(dev) != &init_net)
 		goto drop;
 
 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
@@ -553,7 +553,7 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
 {
 	struct net_device *dev = (struct net_device *)ptr;
 
-	if (dev->nd_net != &init_net)
+	if (dev_net(dev) != &init_net)
 		return NOTIFY_DONE;
 
 	if (!dev_is_ethdev(dev))

+ 3 - 39
drivers/net/ibmveth.c

@@ -1259,26 +1259,7 @@ static void ibmveth_proc_unregister_driver(void)
 	remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
 }
 
-static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	if (*pos == 0) {
-		return (void *)1;
-	} else {
-		return NULL;
-	}
-}
-
-static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	++*pos;
-	return NULL;
-}
-
-static void ibmveth_seq_stop(struct seq_file *seq, void *v)
-{
-}
-
-static int ibmveth_seq_show(struct seq_file *seq, void *v)
+static int ibmveth_show(struct seq_file *seq, void *v)
 {
 	struct ibmveth_adapter *adapter = seq->private;
 	char *current_mac = ((char*) &adapter->netdev->dev_addr);
@@ -1302,27 +1283,10 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
 
 	return 0;
 }
-static struct seq_operations ibmveth_seq_ops = {
-	.start = ibmveth_seq_start,
-	.next  = ibmveth_seq_next,
-	.stop  = ibmveth_seq_stop,
-	.show  = ibmveth_seq_show,
-};
 
 static int ibmveth_proc_open(struct inode *inode, struct file *file)
 {
-	struct seq_file *seq;
-	struct proc_dir_entry *proc;
-	int rc;
-
-	rc = seq_open(file, &ibmveth_seq_ops);
-	if (!rc) {
-		/* recover the pointer buried in proc_dir_entry data */
-		seq = file->private_data;
-		proc = PDE(inode);
-		seq->private = proc->data;
-	}
-	return rc;
+	return single_open(file, ibmveth_show, PDE(inode)->data);
 }
 
 static const struct file_operations ibmveth_proc_fops = {
@@ -1330,7 +1294,7 @@ static const struct file_operations ibmveth_proc_fops = {
 	.open    = ibmveth_proc_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
-	.release = seq_release,
+	.release = single_release,
 };
 
 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)

+ 39 - 21
drivers/net/ixgb/ixgb.h

@@ -117,8 +117,8 @@ struct ixgb_buffer {
 	struct sk_buff *skb;
 	dma_addr_t dma;
 	unsigned long time_stamp;
-	uint16_t length;
-	uint16_t next_to_watch;
+	u16 length;
+	u16 next_to_watch;
 };
 
 struct ixgb_desc_ring {
@@ -152,13 +152,12 @@ struct ixgb_desc_ring {
 struct ixgb_adapter {
 	struct timer_list watchdog_timer;
 	struct vlan_group *vlgrp;
-	uint32_t bd_number;
-	uint32_t rx_buffer_len;
-	uint32_t part_num;
-	uint16_t link_speed;
-	uint16_t link_duplex;
+	u32 bd_number;
+	u32 rx_buffer_len;
+	u32 part_num;
+	u16 link_speed;
+	u16 link_duplex;
 	spinlock_t tx_lock;
-	atomic_t irq_sem;
 	struct work_struct tx_timeout_task;
 
 	struct timer_list blink_timer;
@@ -168,20 +167,20 @@ struct ixgb_adapter {
 	struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
 	unsigned int restart_queue;
 	unsigned long timeo_start;
-	uint32_t tx_cmd_type;
-	uint64_t hw_csum_tx_good;
-	uint64_t hw_csum_tx_error;
-	uint32_t tx_int_delay;
-	uint32_t tx_timeout_count;
-	boolean_t tx_int_delay_enable;
-	boolean_t detect_tx_hung;
+	u32 tx_cmd_type;
+	u64 hw_csum_tx_good;
+	u64 hw_csum_tx_error;
+	u32 tx_int_delay;
+	u32 tx_timeout_count;
+	bool tx_int_delay_enable;
+	bool detect_tx_hung;
 
 	/* RX */
 	struct ixgb_desc_ring rx_ring;
-	uint64_t hw_csum_rx_error;
-	uint64_t hw_csum_rx_good;
-	uint32_t rx_int_delay;
-	boolean_t rx_csum;
+	u64 hw_csum_rx_error;
+	u64 hw_csum_rx_good;
+	u32 rx_int_delay;
+	bool rx_csum;
 
 	/* OS defined structs */
 	struct napi_struct napi;
@@ -193,8 +192,17 @@ struct ixgb_adapter {
 	struct ixgb_hw hw;
 	u16 msg_enable;
 	struct ixgb_hw_stats stats;
-	uint32_t alloc_rx_buff_failed;
-	boolean_t have_msi;
+	u32 alloc_rx_buff_failed;
+	bool have_msi;
+	unsigned long flags;
+};
+
+enum ixgb_state_t {
+	/* TBD
+	__IXGB_TESTING,
+	__IXGB_RESETTING,
+	*/
+	__IXGB_DOWN
 };
 
 /* Exported from other modules */
@@ -203,4 +211,14 @@ extern void ixgb_set_ethtool_ops(struct net_device *netdev);
 extern char ixgb_driver_name[];
 extern const char ixgb_driver_version[];
 
+extern int ixgb_up(struct ixgb_adapter *adapter);
+extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
+extern void ixgb_reset(struct ixgb_adapter *adapter);
+extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+
+
 #endif /* _IXGB_H_ */

+ 62 - 62
drivers/net/ixgb/ixgb_ee.c

@@ -29,14 +29,14 @@
 #include "ixgb_hw.h"
 #include "ixgb_ee.h"
 /* Local prototypes */
-static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw);
+static u16 ixgb_shift_in_bits(struct ixgb_hw *hw);
 
 static void ixgb_shift_out_bits(struct ixgb_hw *hw,
-				uint16_t data,
-				uint16_t count);
+				u16 data,
+				u16 count);
 static void ixgb_standby_eeprom(struct ixgb_hw *hw);
 
-static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw);
+static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw);
 
 static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
 
@@ -48,7 +48,7 @@ static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
  *****************************************************************************/
 static void
 ixgb_raise_clock(struct ixgb_hw *hw,
-		  uint32_t *eecd_reg)
+		  u32 *eecd_reg)
 {
 	/* Raise the clock input to the EEPROM (by setting the SK bit), and then
 	 *  wait 50 microseconds.
@@ -67,7 +67,7 @@ ixgb_raise_clock(struct ixgb_hw *hw,
  *****************************************************************************/
 static void
 ixgb_lower_clock(struct ixgb_hw *hw,
-		  uint32_t *eecd_reg)
+		  u32 *eecd_reg)
 {
 	/* Lower the clock input to the EEPROM (by clearing the SK bit), and then
 	 * wait 50 microseconds.
@@ -87,11 +87,11 @@ ixgb_lower_clock(struct ixgb_hw *hw,
  *****************************************************************************/
 static void
 ixgb_shift_out_bits(struct ixgb_hw *hw,
-					 uint16_t data,
-					 uint16_t count)
+					 u16 data,
+					 u16 count)
 {
-	uint32_t eecd_reg;
-	uint32_t mask;
+	u32 eecd_reg;
+	u32 mask;
 
 	/* We need to shift "count" bits out to the EEPROM. So, value in the
 	 * "data" parameter will be shifted out to the EEPROM one bit at a time.
@@ -133,12 +133,12 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-static uint16_t
+static u16
 ixgb_shift_in_bits(struct ixgb_hw *hw)
 {
-	uint32_t eecd_reg;
-	uint32_t i;
-	uint16_t data;
+	u32 eecd_reg;
+	u32 i;
+	u16 data;
 
 	/* In order to read a register from the EEPROM, we need to shift 16 bits
 	 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
@@ -179,7 +179,7 @@ ixgb_shift_in_bits(struct ixgb_hw *hw)
 static void
 ixgb_setup_eeprom(struct ixgb_hw *hw)
 {
-	uint32_t eecd_reg;
+	u32 eecd_reg;
 
 	eecd_reg = IXGB_READ_REG(hw, EECD);
 
@@ -201,7 +201,7 @@ ixgb_setup_eeprom(struct ixgb_hw *hw)
 static void
 ixgb_standby_eeprom(struct ixgb_hw *hw)
 {
-	uint32_t eecd_reg;
+	u32 eecd_reg;
 
 	eecd_reg = IXGB_READ_REG(hw, EECD);
 
@@ -235,7 +235,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
 static void
 ixgb_clock_eeprom(struct ixgb_hw *hw)
 {
-	uint32_t eecd_reg;
+	u32 eecd_reg;
 
 	eecd_reg = IXGB_READ_REG(hw, EECD);
 
@@ -259,7 +259,7 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
 static void
 ixgb_cleanup_eeprom(struct ixgb_hw *hw)
 {
-	uint32_t eecd_reg;
+	u32 eecd_reg;
 
 	eecd_reg = IXGB_READ_REG(hw, EECD);
 
@@ -279,14 +279,14 @@ ixgb_cleanup_eeprom(struct ixgb_hw *hw)
  * The command is done when the EEPROM's data out pin goes high.
  *
  * Returns:
- *      TRUE: EEPROM data pin is high before timeout.
- *      FALSE:  Time expired.
+ *      true: EEPROM data pin is high before timeout.
+ *      false:  Time expired.
  *****************************************************************************/
-static boolean_t
+static bool
 ixgb_wait_eeprom_command(struct ixgb_hw *hw)
 {
-	uint32_t eecd_reg;
-	uint32_t i;
+	u32 eecd_reg;
+	u32 i;
 
 	/* Toggle the CS line.  This in effect tells to EEPROM to actually execute
 	 * the command in question.
@@ -301,12 +301,12 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
 		eecd_reg = IXGB_READ_REG(hw, EECD);
 
 		if(eecd_reg & IXGB_EECD_DO)
-			return (TRUE);
+			return (true);
 
 		udelay(50);
 	}
 	ASSERT(0);
-	return (FALSE);
+	return (false);
 }
 
 /******************************************************************************
@@ -319,22 +319,22 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
  * valid.
  *
  * Returns:
- *  TRUE: Checksum is valid
- *  FALSE: Checksum is not valid.
+ *  true: Checksum is valid
+ *  false: Checksum is not valid.
  *****************************************************************************/
-boolean_t
+bool
 ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
 {
-	uint16_t checksum = 0;
-	uint16_t i;
+	u16 checksum = 0;
+	u16 i;
 
 	for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
 		checksum += ixgb_read_eeprom(hw, i);
 
-	if(checksum == (uint16_t) EEPROM_SUM)
-		return (TRUE);
+	if(checksum == (u16) EEPROM_SUM)
+		return (true);
 	else
-		return (FALSE);
+		return (false);
 }
 
 /******************************************************************************
@@ -348,13 +348,13 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
 void
 ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
 {
-	uint16_t checksum = 0;
-	uint16_t i;
+	u16 checksum = 0;
+	u16 i;
 
 	for(i = 0; i < EEPROM_CHECKSUM_REG; i++)
 		checksum += ixgb_read_eeprom(hw, i);
 
-	checksum = (uint16_t) EEPROM_SUM - checksum;
+	checksum = (u16) EEPROM_SUM - checksum;
 
 	ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
 	return;
@@ -372,7 +372,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
  *
  *****************************************************************************/
 void
-ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
+ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
 {
 	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
 
@@ -425,11 +425,11 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
  * Returns:
  *  The 16-bit value read from the eeprom
  *****************************************************************************/
-uint16_t
+u16
 ixgb_read_eeprom(struct ixgb_hw *hw,
-		  uint16_t offset)
+		  u16 offset)
 {
-	uint16_t data;
+	u16 data;
 
 	/*  Prepare the EEPROM for reading  */
 	ixgb_setup_eeprom(hw);
@@ -457,14 +457,14 @@ ixgb_read_eeprom(struct ixgb_hw *hw,
  * hw - Struct containing variables accessed by shared code
  *
  * Returns:
- *      TRUE: if eeprom read is successful
- *      FALSE: otherwise.
+ *      true: if eeprom read is successful
+ *      false: otherwise.
  *****************************************************************************/
-boolean_t
+bool
 ixgb_get_eeprom_data(struct ixgb_hw *hw)
 {
-	uint16_t i;
-	uint16_t checksum = 0;
+	u16 i;
+	u16 checksum = 0;
 	struct ixgb_ee_map_type *ee_map;
 
 	DEBUGFUNC("ixgb_get_eeprom_data");
@@ -473,27 +473,27 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
 
 	DEBUGOUT("ixgb_ee: Reading eeprom data\n");
 	for(i = 0; i < IXGB_EEPROM_SIZE ; i++) {
-		uint16_t ee_data;
+		u16 ee_data;
 		ee_data = ixgb_read_eeprom(hw, i);
 		checksum += ee_data;
 		hw->eeprom[i] = cpu_to_le16(ee_data);
 	}
 
-	if (checksum != (uint16_t) EEPROM_SUM) {
+	if (checksum != (u16) EEPROM_SUM) {
 		DEBUGOUT("ixgb_ee: Checksum invalid.\n");
 		/* clear the init_ctrl_reg_1 to signify that the cache is
 		 * invalidated */
 		ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
-		return (FALSE);
+		return (false);
 	}
 
 	if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
 		 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
 		DEBUGOUT("ixgb_ee: Signature invalid.\n");
-		return(FALSE);
+		return(false);
 	}
 
-	return(TRUE);
+	return(true);
 }
 
 /******************************************************************************
@@ -503,17 +503,17 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
  * hw - Struct containing variables accessed by shared code
  *
  * Returns:
- *      TRUE: eeprom signature was good and the eeprom read was successful
- *      FALSE: otherwise.
+ *      true: eeprom signature was good and the eeprom read was successful
+ *      false: otherwise.
  ******************************************************************************/
-static boolean_t
+static bool
 ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
 {
 	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
 
 	if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
 	    == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
-		return (TRUE);
+		return (true);
 	} else {
 		return ixgb_get_eeprom_data(hw);
 	}
@@ -529,11 +529,11 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
  *          Word at indexed offset in eeprom, if valid, 0 otherwise.
  ******************************************************************************/
 __le16
-ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
+ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
 {
 
 	if ((index < IXGB_EEPROM_SIZE) &&
-		(ixgb_check_and_get_eeprom_data(hw) == TRUE)) {
+		(ixgb_check_and_get_eeprom_data(hw) == true)) {
 	   return(hw->eeprom[index]);
 	}
 
@@ -550,14 +550,14 @@ ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
  ******************************************************************************/
 void
 ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
-			uint8_t *mac_addr)
+			u8 *mac_addr)
 {
 	int i;
 	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
 
 	DEBUGFUNC("ixgb_get_ee_mac_addr");
 
-	if (ixgb_check_and_get_eeprom_data(hw) == TRUE) {
+	if (ixgb_check_and_get_eeprom_data(hw) == true) {
 		for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
 			mac_addr[i] = ee_map->mac_addr[i];
 			DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
@@ -574,10 +574,10 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
  * Returns:
  *          PBA number if EEPROM contents are valid, 0 otherwise
  ******************************************************************************/
-uint32_t
+u32
 ixgb_get_ee_pba_number(struct ixgb_hw *hw)
 {
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+	if (ixgb_check_and_get_eeprom_data(hw) == true)
 		return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
 			| (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16));
 
@@ -593,12 +593,12 @@ ixgb_get_ee_pba_number(struct ixgb_hw *hw)
  * Returns:
  *          Device Id if EEPROM contents are valid, 0 otherwise
  ******************************************************************************/
-uint16_t
+u16
 ixgb_get_ee_device_id(struct ixgb_hw *hw)
 {
 	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
 
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
+	if (ixgb_check_and_get_eeprom_data(hw) == true)
 		return (le16_to_cpu(ee_map->device_id));
 
 	return (0);

+ 6 - 6
drivers/net/ixgb/ixgb_ee.h

@@ -75,7 +75,7 @@
 
 /* EEPROM structure */
 struct ixgb_ee_map_type {
-	uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
+	u8 mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
 	__le16 compatibility;
 	__le16 reserved1[4];
 	__le32 pba_number;
@@ -88,19 +88,19 @@ struct ixgb_ee_map_type {
 	__le16 oem_reserved[16];
 	__le16 swdpins_reg;
 	__le16 circuit_ctrl_reg;
-	uint8_t d3_power;
-	uint8_t d0_power;
+	u8 d3_power;
+	u8 d0_power;
 	__le16 reserved2[28];
 	__le16 checksum;
 };
 
 /* EEPROM Functions */
-uint16_t ixgb_read_eeprom(struct ixgb_hw *hw, uint16_t reg);
+u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg);
 
-boolean_t ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
+bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
 
 void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
 
-void ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t reg, uint16_t data);
+void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data);
 
 #endif				/* IXGB_EE_H */

+ 29 - 38
drivers/net/ixgb/ixgb_ethtool.c

@@ -32,15 +32,6 @@
 
 #include <asm/uaccess.h>
 
-extern int ixgb_up(struct ixgb_adapter *adapter);
-extern void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
-extern void ixgb_reset(struct ixgb_adapter *adapter);
-extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_update_stats(struct ixgb_adapter *adapter);
-
 #define IXGB_ALL_RAR_ENTRIES 16
 
 struct ixgb_stats {
@@ -136,7 +127,7 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 		return -EINVAL;
 	
 	if(netif_running(adapter->netdev)) {
-		ixgb_down(adapter, TRUE);
+		ixgb_down(adapter, true);
 		ixgb_reset(adapter);
 		ixgb_up(adapter);
 		ixgb_set_speed_duplex(netdev);
@@ -185,7 +176,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
 		hw->fc.type = ixgb_fc_none;
 
 	if(netif_running(adapter->netdev)) {
-		ixgb_down(adapter, TRUE);
+		ixgb_down(adapter, true);
 		ixgb_up(adapter);
 		ixgb_set_speed_duplex(netdev);
 	} else
@@ -194,7 +185,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
 	return 0;
 }
 
-static uint32_t
+static u32
 ixgb_get_rx_csum(struct net_device *netdev)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -203,14 +194,14 @@ ixgb_get_rx_csum(struct net_device *netdev)
 }
 
 static int
-ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
+ixgb_set_rx_csum(struct net_device *netdev, u32 data)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	adapter->rx_csum = data;
 
 	if(netif_running(netdev)) {
-		ixgb_down(adapter,TRUE);
+		ixgb_down(adapter, true);
 		ixgb_up(adapter);
 		ixgb_set_speed_duplex(netdev);
 	} else
@@ -218,14 +209,14 @@ ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
 	return 0;
 }
 	
-static uint32_t
+static u32
 ixgb_get_tx_csum(struct net_device *netdev)
 {
 	return (netdev->features & NETIF_F_HW_CSUM) != 0;
 }
 
 static int
-ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
+ixgb_set_tx_csum(struct net_device *netdev, u32 data)
 {
 	if (data)
 		netdev->features |= NETIF_F_HW_CSUM;
@@ -236,7 +227,7 @@ ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
 }
 
 static int
-ixgb_set_tso(struct net_device *netdev, uint32_t data)
+ixgb_set_tso(struct net_device *netdev, u32 data)
 {
 	if(data)
 		netdev->features |= NETIF_F_TSO;
@@ -245,7 +236,7 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
 	return 0;
 } 
 
-static uint32_t
+static u32
 ixgb_get_msglevel(struct net_device *netdev)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -253,7 +244,7 @@ ixgb_get_msglevel(struct net_device *netdev)
 }
 
 static void
-ixgb_set_msglevel(struct net_device *netdev, uint32_t data)
+ixgb_set_msglevel(struct net_device *netdev, u32 data)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	adapter->msg_enable = data;
@@ -263,7 +254,7 @@ ixgb_set_msglevel(struct net_device *netdev, uint32_t data)
 static int 
 ixgb_get_regs_len(struct net_device *netdev)
 {
-#define IXGB_REG_DUMP_LEN  136*sizeof(uint32_t)
+#define IXGB_REG_DUMP_LEN  136*sizeof(u32)
 	return IXGB_REG_DUMP_LEN;
 }
 
@@ -273,9 +264,9 @@ ixgb_get_regs(struct net_device *netdev,
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
-	uint32_t *reg = p;
-	uint32_t *reg_start = reg;
-	uint8_t i;
+	u32 *reg = p;
+	u32 *reg_start = reg;
+	u8 i;
 
 	/* the 1 (one) below indicates an attempt at versioning, if the
 	 * interface in ethtool or the driver changes, this 1 should be
@@ -404,7 +395,7 @@ ixgb_get_regs(struct net_device *netdev,
 	*reg++ = IXGB_GET_STAT(adapter, xofftxc);	/* 134 */
 	*reg++ = IXGB_GET_STAT(adapter, rjc);	/* 135 */
 
-	regs->len = (reg - reg_start) * sizeof(uint32_t);
+	regs->len = (reg - reg_start) * sizeof(u32);
 }
 
 static int
@@ -416,7 +407,7 @@ ixgb_get_eeprom_len(struct net_device *netdev)
 
 static int
 ixgb_get_eeprom(struct net_device *netdev,
-		  struct ethtool_eeprom *eeprom, uint8_t *bytes)
+		  struct ethtool_eeprom *eeprom, u8 *bytes)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
@@ -454,7 +445,7 @@ ixgb_get_eeprom(struct net_device *netdev,
 		eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
 	}
 
-	memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
 			eeprom->len);
 	kfree(eeprom_buff);
 
@@ -464,14 +455,14 @@ geeprom_error:
 
 static int
 ixgb_set_eeprom(struct net_device *netdev,
-		  struct ethtool_eeprom *eeprom, uint8_t *bytes)
+		  struct ethtool_eeprom *eeprom, u8 *bytes)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
-	uint16_t *eeprom_buff;
+	u16 *eeprom_buff;
 	void *ptr;
 	int max_len, first_word, last_word;
-	uint16_t i;
+	u16 i;
 
 	if(eeprom->len == 0)
 		return -EINVAL;
@@ -570,14 +561,14 @@ ixgb_set_ringparam(struct net_device *netdev,
 		return -EINVAL;
 
 	if(netif_running(adapter->netdev))
-		ixgb_down(adapter,TRUE);
+		ixgb_down(adapter, true);
 
-	rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD);
-	rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD);
+	rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
+	rxdr->count = min(rxdr->count,(u32)MAX_RXD);
 	rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
 
-	txdr->count = max(ring->tx_pending,(uint32_t)MIN_TXD);
-	txdr->count = min(txdr->count,(uint32_t)MAX_TXD);
+	txdr->count = max(ring->tx_pending,(u32)MIN_TXD);
+	txdr->count = min(txdr->count,(u32)MAX_TXD);
 	txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
 
 	if(netif_running(adapter->netdev)) {
@@ -633,7 +624,7 @@ ixgb_led_blink_callback(unsigned long data)
 }
 
 static int
-ixgb_phys_id(struct net_device *netdev, uint32_t data)
+ixgb_phys_id(struct net_device *netdev, u32 data)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
@@ -669,7 +660,7 @@ ixgb_get_sset_count(struct net_device *netdev, int sset)
 
 static void 
 ixgb_get_ethtool_stats(struct net_device *netdev, 
-		struct ethtool_stats *stats, uint64_t *data)
+		struct ethtool_stats *stats, u64 *data)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	int i;
@@ -678,12 +669,12 @@ ixgb_get_ethtool_stats(struct net_device *netdev,
 	for(i = 0; i < IXGB_STATS_LEN; i++) {
 		char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;	
 		data[i] = (ixgb_gstrings_stats[i].sizeof_stat == 
-			sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
 }
 
 static void 
-ixgb_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
+ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
 {
 	int i;
 

+ 99 - 100
drivers/net/ixgb/ixgb_hw.c

@@ -35,13 +35,13 @@
 
 /*  Local function prototypes */
 
-static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t * mc_addr);
+static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
 
-static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value);
+static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value);
 
 static void ixgb_get_bus_info(struct ixgb_hw *hw);
 
-static boolean_t ixgb_link_reset(struct ixgb_hw *hw);
+static bool ixgb_link_reset(struct ixgb_hw *hw);
 
 static void ixgb_optics_reset(struct ixgb_hw *hw);
 
@@ -55,18 +55,18 @@ static void ixgb_clear_vfta(struct ixgb_hw *hw);
 
 static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
 
-static uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw,
-				  uint32_t reg_address,
-				  uint32_t phy_address,
-				  uint32_t device_type);
+static u16 ixgb_read_phy_reg(struct ixgb_hw *hw,
+				  u32 reg_address,
+				  u32 phy_address,
+				  u32 device_type);
 
-static boolean_t ixgb_setup_fc(struct ixgb_hw *hw);
+static bool ixgb_setup_fc(struct ixgb_hw *hw);
 
-static boolean_t mac_addr_valid(uint8_t *mac_addr);
+static bool mac_addr_valid(u8 *mac_addr);
 
-static uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
+static u32 ixgb_mac_reset(struct ixgb_hw *hw)
 {
-	uint32_t ctrl_reg;
+	u32 ctrl_reg;
 
 	ctrl_reg =  IXGB_CTRL0_RST |
 				IXGB_CTRL0_SDP3_DIR |   /* All pins are Output=1 */
@@ -114,11 +114,11 @@ static uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-boolean_t
+bool
 ixgb_adapter_stop(struct ixgb_hw *hw)
 {
-	uint32_t ctrl_reg;
-	uint32_t icr_reg;
+	u32 ctrl_reg;
+	u32 icr_reg;
 
 	DEBUGFUNC("ixgb_adapter_stop");
 
@@ -127,13 +127,13 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
 	 */
 	if(hw->adapter_stopped) {
 		DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
-		return FALSE;
+		return false;
 	}
 
 	/* Set the Adapter Stopped flag so other driver functions stop
 	 * touching the Hardware.
 	 */
-	hw->adapter_stopped = TRUE;
+	hw->adapter_stopped = true;
 
 	/* Clear interrupt mask to stop board from generating interrupts */
 	DEBUGOUT("Masking off all interrupts\n");
@@ -179,8 +179,8 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
 static ixgb_xpak_vendor
 ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
 {
-	uint32_t i;
-	uint16_t vendor_name[5];
+	u32 i;
+	u16 vendor_name[5];
 	ixgb_xpak_vendor xpak_vendor;
 
 	DEBUGFUNC("ixgb_identify_xpak_vendor");
@@ -286,15 +286,15 @@ ixgb_identify_phy(struct ixgb_hw *hw)
  * Leaves the transmit and receive units disabled and uninitialized.
  *
  * Returns:
- *      TRUE if successful,
- *      FALSE if unrecoverable problems were encountered.
+ *      true if successful,
+ *      false if unrecoverable problems were encountered.
  *****************************************************************************/
-boolean_t
+bool
 ixgb_init_hw(struct ixgb_hw *hw)
 {
-	uint32_t i;
-	uint32_t ctrl_reg;
-	boolean_t status;
+	u32 i;
+	u32 ctrl_reg;
+	bool status;
 
 	DEBUGFUNC("ixgb_init_hw");
 
@@ -318,9 +318,8 @@ ixgb_init_hw(struct ixgb_hw *hw)
 	/* Delay a few ms just to allow the reset to complete */
 	msleep(IXGB_DELAY_AFTER_EE_RESET);
 
-	if (ixgb_get_eeprom_data(hw) == FALSE) {
-		return(FALSE);
-	}
+	if (!ixgb_get_eeprom_data(hw))
+		return false;
 
 	/* Use the device id to determine the type of phy/transceiver. */
 	hw->device_id = ixgb_get_ee_device_id(hw);
@@ -337,11 +336,11 @@ ixgb_init_hw(struct ixgb_hw *hw)
 	 */
 	if (!mac_addr_valid(hw->curr_mac_addr)) {
 		DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
-		return(FALSE);
+		return(false);
 	}
 
 	/* tell the routines in this file they can access hardware again */
-	hw->adapter_stopped = FALSE;
+	hw->adapter_stopped = false;
 
 	/* Fill in the bus_info structure */
 	ixgb_get_bus_info(hw);
@@ -378,7 +377,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
 static void
 ixgb_init_rx_addrs(struct ixgb_hw *hw)
 {
-	uint32_t i;
+	u32 i;
 
 	DEBUGFUNC("ixgb_init_rx_addrs");
 
@@ -438,13 +437,13 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
  *****************************************************************************/
 void
 ixgb_mc_addr_list_update(struct ixgb_hw *hw,
-			  uint8_t *mc_addr_list,
-			  uint32_t mc_addr_count,
-			  uint32_t pad)
+			  u8 *mc_addr_list,
+			  u32 mc_addr_count,
+			  u32 pad)
 {
-	uint32_t hash_value;
-	uint32_t i;
-	uint32_t rar_used_count = 1;		/* RAR[0] is used for our MAC address */
+	u32 hash_value;
+	u32 i;
+	u32 rar_used_count = 1;		/* RAR[0] is used for our MAC address */
 
 	DEBUGFUNC("ixgb_mc_addr_list_update");
 
@@ -516,11 +515,11 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
  * Returns:
  *      The hash value
  *****************************************************************************/
-static uint32_t
+static u32
 ixgb_hash_mc_addr(struct ixgb_hw *hw,
-		   uint8_t *mc_addr)
+		   u8 *mc_addr)
 {
-	uint32_t hash_value = 0;
+	u32 hash_value = 0;
 
 	DEBUGFUNC("ixgb_hash_mc_addr");
 
@@ -534,18 +533,18 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
 	case 0:
 		/* [47:36] i.e. 0x563 for above example address */
 		hash_value =
-		    ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+		    ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
 		break;
 	case 1:		/* [46:35] i.e. 0xAC6 for above example address */
 		hash_value =
-		    ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
+		    ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
 		break;
 	case 2:		/* [45:34] i.e. 0x5D8 for above example address */
 		hash_value =
-		    ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+		    ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
 		break;
 	case 3:		/* [43:32] i.e. 0x634 for above example address */
-		hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
+		hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
 		break;
 	default:
 		/* Invalid mc_filter_type, what should we do? */
@@ -566,10 +565,10 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
  *****************************************************************************/
 static void
 ixgb_mta_set(struct ixgb_hw *hw,
-		  uint32_t hash_value)
+		  u32 hash_value)
 {
-	uint32_t hash_bit, hash_reg;
-	uint32_t mta_reg;
+	u32 hash_bit, hash_reg;
+	u32 mta_reg;
 
 	/* The MTA is a register array of 128 32-bit registers.
 	 * It is treated like an array of 4096 bits.  We want to set
@@ -600,23 +599,23 @@ ixgb_mta_set(struct ixgb_hw *hw,
  *****************************************************************************/
 void
 ixgb_rar_set(struct ixgb_hw *hw,
-		  uint8_t *addr,
-		  uint32_t index)
+		  u8 *addr,
+		  u32 index)
 {
-	uint32_t rar_low, rar_high;
+	u32 rar_low, rar_high;
 
 	DEBUGFUNC("ixgb_rar_set");
 
 	/* HW expects these in little endian so we reverse the byte order
 	 * from network order (big endian) to little endian
 	 */
-	rar_low = ((uint32_t) addr[0] |
-		   ((uint32_t)addr[1] << 8) |
-		   ((uint32_t)addr[2] << 16) |
-		   ((uint32_t)addr[3] << 24));
+	rar_low = ((u32) addr[0] |
+		   ((u32)addr[1] << 8) |
+		   ((u32)addr[2] << 16) |
+		   ((u32)addr[3] << 24));
 
-	rar_high = ((uint32_t) addr[4] |
-			((uint32_t)addr[5] << 8) |
+	rar_high = ((u32) addr[4] |
+			((u32)addr[5] << 8) |
 			IXGB_RAH_AV);
 
 	IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
@@ -633,8 +632,8 @@ ixgb_rar_set(struct ixgb_hw *hw,
  *****************************************************************************/
 void
 ixgb_write_vfta(struct ixgb_hw *hw,
-		 uint32_t offset,
-		 uint32_t value)
+		 u32 offset,
+		 u32 value)
 {
 	IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
 	return;
@@ -648,7 +647,7 @@ ixgb_write_vfta(struct ixgb_hw *hw,
 static void
 ixgb_clear_vfta(struct ixgb_hw *hw)
 {
-	uint32_t offset;
+	u32 offset;
 
 	for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
 		IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
@@ -661,12 +660,12 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
 
-static boolean_t
+static bool
 ixgb_setup_fc(struct ixgb_hw *hw)
 {
-	uint32_t ctrl_reg;
-	uint32_t pap_reg = 0;   /* by default, assume no pause time */
-	boolean_t status = TRUE;
+	u32 ctrl_reg;
+	u32 pap_reg = 0;   /* by default, assume no pause time */
+	bool status = true;
 
 	DEBUGFUNC("ixgb_setup_fc");
 
@@ -763,15 +762,15 @@ ixgb_setup_fc(struct ixgb_hw *hw)
  * This requires that first an address cycle command is sent, followed by a
  * read command.
  *****************************************************************************/
-static uint16_t
+static u16
 ixgb_read_phy_reg(struct ixgb_hw *hw,
-		uint32_t reg_address,
-		uint32_t phy_address,
-		uint32_t device_type)
+		u32 reg_address,
+		u32 phy_address,
+		u32 device_type)
 {
-	uint32_t i;
-	uint32_t data;
-	uint32_t command = 0;
+	u32 i;
+	u32 data;
+	u32 command = 0;
 
 	ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
 	ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
@@ -836,7 +835,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
 	 */
 	data = IXGB_READ_REG(hw, MSRWD);
 	data >>= IXGB_MSRWD_READ_DATA_SHIFT;
-	return((uint16_t) data);
+	return((u16) data);
 }
 
 /******************************************************************************
@@ -858,20 +857,20 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
  *****************************************************************************/
 static void
 ixgb_write_phy_reg(struct ixgb_hw *hw,
-			uint32_t reg_address,
-			uint32_t phy_address,
-			uint32_t device_type,
-			uint16_t data)
+			u32 reg_address,
+			u32 phy_address,
+			u32 device_type,
+			u16 data)
 {
-	uint32_t i;
-	uint32_t command = 0;
+	u32 i;
+	u32 command = 0;
 
 	ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
 	ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
 	ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
 
 	/* Put the data in the MDIO Read/Write Data register */
-	IXGB_WRITE_REG(hw, MSRWD, (uint32_t)data);
+	IXGB_WRITE_REG(hw, MSRWD, (u32)data);
 
 	/* Setup and write the address cycle command */
 	command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT)  |
@@ -940,8 +939,8 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
 void
 ixgb_check_for_link(struct ixgb_hw *hw)
 {
-	uint32_t status_reg;
-	uint32_t xpcss_reg;
+	u32 status_reg;
+	u32 xpcss_reg;
 
 	DEBUGFUNC("ixgb_check_for_link");
 
@@ -950,7 +949,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
 
 	if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
 	    (status_reg & IXGB_STATUS_LU)) {
-		hw->link_up = TRUE;
+		hw->link_up = true;
 	} else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
 		   (status_reg & IXGB_STATUS_LU)) {
 		DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
@@ -974,10 +973,10 @@ ixgb_check_for_link(struct ixgb_hw *hw)
  *
  * Called by any function that needs to check the link status of the adapter.
  *****************************************************************************/
-boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
+bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
 {
-	uint32_t newLFC, newRFC;
-	boolean_t bad_link_returncode = FALSE;
+	u32 newLFC, newRFC;
+	bool bad_link_returncode = false;
 
 	if (hw->phy_type == ixgb_phy_type_txn17401) {
 		newLFC = IXGB_READ_REG(hw, LFC);
@@ -986,7 +985,7 @@ boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
 		    || (hw->lastRFC + 250 < newRFC)) {
 			DEBUGOUT
 			    ("BAD LINK! too many LFC/RFC since last check\n");
-			bad_link_returncode = TRUE;
+			bad_link_returncode = true;
 		}
 		hw->lastLFC = newLFC;
 		hw->lastRFC = newRFC;
@@ -1003,7 +1002,7 @@ boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
 static void
 ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
 {
-	volatile uint32_t temp_reg;
+	volatile u32 temp_reg;
 
 	DEBUGFUNC("ixgb_clear_hw_cntrs");
 
@@ -1084,7 +1083,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
 void
 ixgb_led_on(struct ixgb_hw *hw)
 {
-	uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
+	u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
 
 	/* To turn on the LED, clear software-definable pin 0 (SDP0). */
 	ctrl0_reg &= ~IXGB_CTRL0_SDP0;
@@ -1100,7 +1099,7 @@ ixgb_led_on(struct ixgb_hw *hw)
 void
 ixgb_led_off(struct ixgb_hw *hw)
 {
-	uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
+	u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
 
 	/* To turn off the LED, set software-definable pin 0 (SDP0). */
 	ctrl0_reg |= IXGB_CTRL0_SDP0;
@@ -1116,7 +1115,7 @@ ixgb_led_off(struct ixgb_hw *hw)
 static void
 ixgb_get_bus_info(struct ixgb_hw *hw)
 {
-	uint32_t status_reg;
+	u32 status_reg;
 
 	status_reg = IXGB_READ_REG(hw, STATUS);
 
@@ -1155,21 +1154,21 @@ ixgb_get_bus_info(struct ixgb_hw *hw)
  * mac_addr - pointer to MAC address.
  *
  *****************************************************************************/
-static boolean_t
-mac_addr_valid(uint8_t *mac_addr)
+static bool
+mac_addr_valid(u8 *mac_addr)
 {
-	boolean_t is_valid = TRUE;
+	bool is_valid = true;
 	DEBUGFUNC("mac_addr_valid");
 
 	/* Make sure it is not a multicast address */
 	if (IS_MULTICAST(mac_addr)) {
 		DEBUGOUT("MAC address is multicast\n");
-		is_valid = FALSE;
+		is_valid = false;
 	}
 	/* Not a broadcast address */
 	else if (IS_BROADCAST(mac_addr)) {
 		DEBUGOUT("MAC address is broadcast\n");
-		is_valid = FALSE;
+		is_valid = false;
 	}
 	/* Reject the zero address */
 	else if (mac_addr[0] == 0 &&
@@ -1179,7 +1178,7 @@ mac_addr_valid(uint8_t *mac_addr)
 			 mac_addr[4] == 0 &&
 			 mac_addr[5] == 0) {
 		DEBUGOUT("MAC address is all zeros\n");
-		is_valid = FALSE;
+		is_valid = false;
 	}
 	return (is_valid);
 }
@@ -1190,12 +1189,12 @@ mac_addr_valid(uint8_t *mac_addr)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-static boolean_t
+static bool
 ixgb_link_reset(struct ixgb_hw *hw)
 {
-	boolean_t link_status = FALSE;
-	uint8_t wait_retries = MAX_RESET_ITERATIONS;
-	uint8_t lrst_retries = MAX_RESET_ITERATIONS;
+	bool link_status = false;
+	u8 wait_retries = MAX_RESET_ITERATIONS;
+	u8 lrst_retries = MAX_RESET_ITERATIONS;
 
 	do {
 		/* Reset the link */
@@ -1208,7 +1207,7 @@ ixgb_link_reset(struct ixgb_hw *hw)
 			link_status =
 			    ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
 			     && (IXGB_READ_REG(hw, XPCSS) &
-				 IXGB_XPCSS_ALIGN_STATUS)) ? TRUE : FALSE;
+				 IXGB_XPCSS_ALIGN_STATUS)) ? true : false;
 		} while (!link_status && --wait_retries);
 
 	} while (!link_status && --lrst_retries);
@@ -1225,7 +1224,7 @@ static void
 ixgb_optics_reset(struct ixgb_hw *hw)
 {
 	if (hw->phy_type == ixgb_phy_type_txn17401) {
-		uint16_t mdio_reg;
+		u16 mdio_reg;
 
 		ixgb_write_phy_reg(hw,
 					MDIO_PMA_PMD_CR1,

+ 125 - 125
drivers/net/ixgb/ixgb_hw.h

@@ -538,8 +538,8 @@ struct ixgb_rx_desc {
 	__le64 buff_addr;
 	__le16 length;
 	__le16 reserved;
-	uint8_t status;
-	uint8_t errors;
+	u8 status;
+	u8 errors;
 	__le16 special;
 };
 
@@ -570,8 +570,8 @@ struct ixgb_rx_desc {
 struct ixgb_tx_desc {
 	__le64 buff_addr;
 	__le32 cmd_type_len;
-	uint8_t status;
-	uint8_t popts;
+	u8 status;
+	u8 popts;
 	__le16 vlan;
 };
 
@@ -595,15 +595,15 @@ struct ixgb_tx_desc {
 #define IXGB_TX_DESC_SPECIAL_PRI_SHIFT  IXGB_RX_DESC_SPECIAL_PRI_SHIFT	/* Priority is in upper 3 of 16 */
 
 struct ixgb_context_desc {
-	uint8_t ipcss;
-	uint8_t ipcso;
+	u8 ipcss;
+	u8 ipcso;
 	__le16 ipcse;
-	uint8_t tucss;
-	uint8_t tucso;
+	u8 tucss;
+	u8 tucso;
 	__le16 tucse;
 	__le32 cmd_type_len;
-	uint8_t status;
-	uint8_t hdr_len;
+	u8 status;
+	u8 hdr_len;
 	__le16 mss;
 };
 
@@ -637,33 +637,33 @@ struct ixgb_context_desc {
 
 /* This structure takes a 64k flash and maps it for identification commands */
 struct ixgb_flash_buffer {
-	uint8_t manufacturer_id;
-	uint8_t device_id;
-	uint8_t filler1[0x2AA8];
-	uint8_t cmd2;
-	uint8_t filler2[0x2AAA];
-	uint8_t cmd1;
-	uint8_t filler3[0xAAAA];
+	u8 manufacturer_id;
+	u8 device_id;
+	u8 filler1[0x2AA8];
+	u8 cmd2;
+	u8 filler2[0x2AAA];
+	u8 cmd1;
+	u8 filler3[0xAAAA];
 };
 
 /*
  * This is a little-endian specific check.
  */
 #define IS_MULTICAST(Address) \
-    (boolean_t)(((uint8_t *)(Address))[0] & ((uint8_t)0x01))
+    (bool)(((u8 *)(Address))[0] & ((u8)0x01))
 
 /*
  * Check whether an address is broadcast.
  */
 #define IS_BROADCAST(Address)               \
-    ((((uint8_t *)(Address))[0] == ((uint8_t)0xff)) && (((uint8_t *)(Address))[1] == ((uint8_t)0xff)))
+    ((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff)))
 
 /* Flow control parameters */
 struct ixgb_fc {
-	uint32_t high_water;	/* Flow Control High-water          */
-	uint32_t low_water;	/* Flow Control Low-water           */
-	uint16_t pause_time;	/* Flow Control Pause timer         */
-	boolean_t send_xon;	/* Flow control send XON            */
+	u32 high_water;	/* Flow Control High-water          */
+	u32 low_water;	/* Flow Control Low-water           */
+	u16 pause_time;	/* Flow Control Pause timer         */
+	bool send_xon;		/* Flow control send XON            */
 	ixgb_fc_type type;	/* Type of flow control             */
 };
 
@@ -685,139 +685,139 @@ struct ixgb_bus {
 };
 
 struct ixgb_hw {
-	uint8_t __iomem *hw_addr;/* Base Address of the hardware     */
+	u8 __iomem *hw_addr;/* Base Address of the hardware     */
 	void *back;		/* Pointer to OS-dependent struct   */
 	struct ixgb_fc fc;	/* Flow control parameters          */
 	struct ixgb_bus bus;	/* Bus parameters                   */
-	uint32_t phy_id;	/* Phy Identifier                   */
-	uint32_t phy_addr;	/* XGMII address of Phy             */
+	u32 phy_id;	/* Phy Identifier                   */
+	u32 phy_addr;	/* XGMII address of Phy             */
 	ixgb_mac_type mac_type;	/* Identifier for MAC controller    */
 	ixgb_phy_type phy_type;	/* Transceiver/phy identifier       */
-	uint32_t max_frame_size;	/* Maximum frame size supported     */
-	uint32_t mc_filter_type;	/* Multicast filter hash type       */
-	uint32_t num_mc_addrs;	/* Number of current Multicast addrs */
-	uint8_t curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];	/* Individual address currently programmed in MAC */
-	uint32_t num_tx_desc;	/* Number of Transmit descriptors   */
-	uint32_t num_rx_desc;	/* Number of Receive descriptors    */
-	uint32_t rx_buffer_size;	/* Size of Receive buffer           */
-	boolean_t link_up;	/* TRUE if link is valid            */
-	boolean_t adapter_stopped;	/* State of adapter                 */
-	uint16_t device_id;	/* device id from PCI configuration space */
-	uint16_t vendor_id;	/* vendor id from PCI configuration space */
-	uint8_t revision_id;	/* revision id from PCI configuration space */
-	uint16_t subsystem_vendor_id;	/* subsystem vendor id from PCI configuration space */
-	uint16_t subsystem_id;	/* subsystem id from PCI configuration space */
-	uint32_t bar0;		/* Base Address registers           */
-	uint32_t bar1;
-	uint32_t bar2;
-	uint32_t bar3;
-	uint16_t pci_cmd_word;	/* PCI command register id from PCI configuration space */
+	u32 max_frame_size;	/* Maximum frame size supported     */
+	u32 mc_filter_type;	/* Multicast filter hash type       */
+	u32 num_mc_addrs;	/* Number of current Multicast addrs */
+	u8 curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];	/* Individual address currently programmed in MAC */
+	u32 num_tx_desc;	/* Number of Transmit descriptors   */
+	u32 num_rx_desc;	/* Number of Receive descriptors    */
+	u32 rx_buffer_size;	/* Size of Receive buffer           */
+	bool link_up;		/* true if link is valid            */
+	bool adapter_stopped;	/* State of adapter                 */
+	u16 device_id;	/* device id from PCI configuration space */
+	u16 vendor_id;	/* vendor id from PCI configuration space */
+	u8 revision_id;	/* revision id from PCI configuration space */
+	u16 subsystem_vendor_id;	/* subsystem vendor id from PCI configuration space */
+	u16 subsystem_id;	/* subsystem id from PCI configuration space */
+	u32 bar0;		/* Base Address registers           */
+	u32 bar1;
+	u32 bar2;
+	u32 bar3;
+	u16 pci_cmd_word;	/* PCI command register id from PCI configuration space */
 	__le16 eeprom[IXGB_EEPROM_SIZE];	/* EEPROM contents read at init time  */
 	unsigned long io_base;	/* Our I/O mapped location */
-	uint32_t lastLFC;
-	uint32_t lastRFC;
+	u32 lastLFC;
+	u32 lastRFC;
 };
 
 /* Statistics reported by the hardware */
 struct ixgb_hw_stats {
-	uint64_t tprl;
-	uint64_t tprh;
-	uint64_t gprcl;
-	uint64_t gprch;
-	uint64_t bprcl;
-	uint64_t bprch;
-	uint64_t mprcl;
-	uint64_t mprch;
-	uint64_t uprcl;
-	uint64_t uprch;
-	uint64_t vprcl;
-	uint64_t vprch;
-	uint64_t jprcl;
-	uint64_t jprch;
-	uint64_t gorcl;
-	uint64_t gorch;
-	uint64_t torl;
-	uint64_t torh;
-	uint64_t rnbc;
-	uint64_t ruc;
-	uint64_t roc;
-	uint64_t rlec;
-	uint64_t crcerrs;
-	uint64_t icbc;
-	uint64_t ecbc;
-	uint64_t mpc;
-	uint64_t tptl;
-	uint64_t tpth;
-	uint64_t gptcl;
-	uint64_t gptch;
-	uint64_t bptcl;
-	uint64_t bptch;
-	uint64_t mptcl;
-	uint64_t mptch;
-	uint64_t uptcl;
-	uint64_t uptch;
-	uint64_t vptcl;
-	uint64_t vptch;
-	uint64_t jptcl;
-	uint64_t jptch;
-	uint64_t gotcl;
-	uint64_t gotch;
-	uint64_t totl;
-	uint64_t toth;
-	uint64_t dc;
-	uint64_t plt64c;
-	uint64_t tsctc;
-	uint64_t tsctfc;
-	uint64_t ibic;
-	uint64_t rfc;
-	uint64_t lfc;
-	uint64_t pfrc;
-	uint64_t pftc;
-	uint64_t mcfrc;
-	uint64_t mcftc;
-	uint64_t xonrxc;
-	uint64_t xontxc;
-	uint64_t xoffrxc;
-	uint64_t xofftxc;
-	uint64_t rjc;
+	u64 tprl;
+	u64 tprh;
+	u64 gprcl;
+	u64 gprch;
+	u64 bprcl;
+	u64 bprch;
+	u64 mprcl;
+	u64 mprch;
+	u64 uprcl;
+	u64 uprch;
+	u64 vprcl;
+	u64 vprch;
+	u64 jprcl;
+	u64 jprch;
+	u64 gorcl;
+	u64 gorch;
+	u64 torl;
+	u64 torh;
+	u64 rnbc;
+	u64 ruc;
+	u64 roc;
+	u64 rlec;
+	u64 crcerrs;
+	u64 icbc;
+	u64 ecbc;
+	u64 mpc;
+	u64 tptl;
+	u64 tpth;
+	u64 gptcl;
+	u64 gptch;
+	u64 bptcl;
+	u64 bptch;
+	u64 mptcl;
+	u64 mptch;
+	u64 uptcl;
+	u64 uptch;
+	u64 vptcl;
+	u64 vptch;
+	u64 jptcl;
+	u64 jptch;
+	u64 gotcl;
+	u64 gotch;
+	u64 totl;
+	u64 toth;
+	u64 dc;
+	u64 plt64c;
+	u64 tsctc;
+	u64 tsctfc;
+	u64 ibic;
+	u64 rfc;
+	u64 lfc;
+	u64 pfrc;
+	u64 pftc;
+	u64 mcfrc;
+	u64 mcftc;
+	u64 xonrxc;
+	u64 xontxc;
+	u64 xoffrxc;
+	u64 xofftxc;
+	u64 rjc;
 };
 
 /* Function Prototypes */
-extern boolean_t ixgb_adapter_stop(struct ixgb_hw *hw);
-extern boolean_t ixgb_init_hw(struct ixgb_hw *hw);
-extern boolean_t ixgb_adapter_start(struct ixgb_hw *hw);
+extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
+extern bool ixgb_init_hw(struct ixgb_hw *hw);
+extern bool ixgb_adapter_start(struct ixgb_hw *hw);
 extern void ixgb_check_for_link(struct ixgb_hw *hw);
-extern boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw);
+extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
 
 extern void ixgb_rar_set(struct ixgb_hw *hw,
-				uint8_t *addr,
-				uint32_t index);
+				u8 *addr,
+				u32 index);
 
 
 /* Filters (multicast, vlan, receive) */
 extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
-				   uint8_t *mc_addr_list,
-				   uint32_t mc_addr_count,
-				   uint32_t pad);
+				   u8 *mc_addr_list,
+				   u32 mc_addr_count,
+				   u32 pad);
 
 /* Vfta functions */
 extern void ixgb_write_vfta(struct ixgb_hw *hw,
-				 uint32_t offset,
-				 uint32_t value);
+				 u32 offset,
+				 u32 value);
 
 /* Access functions to eeprom data */
-void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
-uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
-boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw);
-__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
+void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
+u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw);
+u16 ixgb_get_ee_device_id(struct ixgb_hw *hw);
+bool ixgb_get_eeprom_data(struct ixgb_hw *hw);
+__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index);
 
 /* Everything else */
 void ixgb_led_on(struct ixgb_hw *hw);
 void ixgb_led_off(struct ixgb_hw *hw);
 void ixgb_write_pci_cfg(struct ixgb_hw *hw,
-			 uint32_t reg,
-			 uint16_t * value);
+			 u32 reg,
+			 u16 * value);
 
 
 #endif /* _IXGB_HW_H_ */

+ 82 - 74
drivers/net/ixgb/ixgb_main.c

@@ -67,7 +67,7 @@ MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
 /* Local Function Prototypes */
 
 int ixgb_up(struct ixgb_adapter *adapter);
-void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
+void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
 void ixgb_reset(struct ixgb_adapter *adapter);
 int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
 int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
@@ -94,22 +94,22 @@ static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
 static int ixgb_set_mac(struct net_device *netdev, void *p);
 static irqreturn_t ixgb_intr(int irq, void *data);
-static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
+static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
 
 #ifdef CONFIG_IXGB_NAPI
 static int ixgb_clean(struct napi_struct *napi, int budget);
-static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
-				   int *work_done, int work_to_do);
+static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
+			      int *work_done, int work_to_do);
 #else
-static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
+static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
 #endif
 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
 static void ixgb_tx_timeout(struct net_device *dev);
 static void ixgb_tx_timeout_task(struct work_struct *work);
 static void ixgb_vlan_rx_register(struct net_device *netdev,
 				  struct vlan_group *grp);
-static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
-static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
+static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -197,7 +197,6 @@ module_exit(ixgb_exit_module);
 static void
 ixgb_irq_disable(struct ixgb_adapter *adapter)
 {
-	atomic_inc(&adapter->irq_sem);
 	IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
 	IXGB_WRITE_FLUSH(&adapter->hw);
 	synchronize_irq(adapter->pdev->irq);
@@ -211,14 +210,12 @@ ixgb_irq_disable(struct ixgb_adapter *adapter)
 static void
 ixgb_irq_enable(struct ixgb_adapter *adapter)
 {
-	if(atomic_dec_and_test(&adapter->irq_sem)) {
-		u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
-			  IXGB_INT_TXDW | IXGB_INT_LSC;
-		if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
-			val |= IXGB_INT_GPI0;
-		IXGB_WRITE_REG(&adapter->hw, IMS, val);
-		IXGB_WRITE_FLUSH(&adapter->hw);
-	}
+	u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
+		  IXGB_INT_TXDW | IXGB_INT_LSC;
+	if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
+		val |= IXGB_INT_GPI0;
+	IXGB_WRITE_REG(&adapter->hw, IMS, val);
+	IXGB_WRITE_FLUSH(&adapter->hw);
 }
 
 int
@@ -274,7 +271,7 @@ ixgb_up(struct ixgb_adapter *adapter)
 
 		if(hw->max_frame_size >
 		   IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
-			uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
+			u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
 
 			if(!(ctrl0 & IXGB_CTRL0_JFE)) {
 				ctrl0 |= IXGB_CTRL0_JFE;
@@ -283,26 +280,30 @@ ixgb_up(struct ixgb_adapter *adapter)
 		}
 	}
 
-	mod_timer(&adapter->watchdog_timer, jiffies);
+	clear_bit(__IXGB_DOWN, &adapter->flags);
 
 #ifdef CONFIG_IXGB_NAPI
 	napi_enable(&adapter->napi);
 #endif
 	ixgb_irq_enable(adapter);
 
+	mod_timer(&adapter->watchdog_timer, jiffies);
+
 	return 0;
 }
 
 void
-ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
+ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
 {
 	struct net_device *netdev = adapter->netdev;
 
+	/* prevent the interrupt handler from restarting watchdog */
+	set_bit(__IXGB_DOWN, &adapter->flags);
+
 #ifdef CONFIG_IXGB_NAPI
 	napi_disable(&adapter->napi);
-	atomic_set(&adapter->irq_sem, 0);
 #endif
-
+	/* waiting for NAPI to complete can re-enable interrupts */
 	ixgb_irq_disable(adapter);
 	free_irq(adapter->pdev->irq, netdev);
 
@@ -589,9 +590,9 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
 	/* enable flow control to be programmed */
 	hw->fc.send_xon = 1;
 
-	atomic_set(&adapter->irq_sem, 1);
 	spin_lock_init(&adapter->tx_lock);
 
+	set_bit(__IXGB_DOWN, &adapter->flags);
 	return 0;
 }
 
@@ -656,7 +657,7 @@ ixgb_close(struct net_device *netdev)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
-	ixgb_down(adapter, TRUE);
+	ixgb_down(adapter, true);
 
 	ixgb_free_tx_resources(adapter);
 	ixgb_free_rx_resources(adapter);
@@ -717,9 +718,9 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
 static void
 ixgb_configure_tx(struct ixgb_adapter *adapter)
 {
-	uint64_t tdba = adapter->tx_ring.dma;
-	uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
-	uint32_t tctl;
+	u64 tdba = adapter->tx_ring.dma;
+	u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
+	u32 tctl;
 	struct ixgb_hw *hw = &adapter->hw;
 
 	/* Setup the Base and Length of the Tx Descriptor Ring 
@@ -805,7 +806,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
 static void
 ixgb_setup_rctl(struct ixgb_adapter *adapter)
 {
-	uint32_t rctl;
+	u32 rctl;
 
 	rctl = IXGB_READ_REG(&adapter->hw, RCTL);
 
@@ -840,12 +841,12 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter)
 static void
 ixgb_configure_rx(struct ixgb_adapter *adapter)
 {
-	uint64_t rdba = adapter->rx_ring.dma;
-	uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
+	u64 rdba = adapter->rx_ring.dma;
+	u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
 	struct ixgb_hw *hw = &adapter->hw;
-	uint32_t rctl;
-	uint32_t rxcsum;
-	uint32_t rxdctl;
+	u32 rctl;
+	u32 rxcsum;
+	u32 rxdctl;
 
 	/* make sure receives are disabled while setting up the descriptors */
 
@@ -881,7 +882,7 @@ ixgb_configure_rx(struct ixgb_adapter *adapter)
 	IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
 
 	/* Enable Receive Checksum Offload for TCP and UDP */
-	if(adapter->rx_csum == TRUE) {
+	if (adapter->rx_csum) {
 		rxcsum = IXGB_READ_REG(hw, RXCSUM);
 		rxcsum |= IXGB_RXCSUM_TUOFL;
 		IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
@@ -1078,7 +1079,7 @@ ixgb_set_multi(struct net_device *netdev)
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
 	struct dev_mc_list *mc_ptr;
-	uint32_t rctl;
+	u32 rctl;
 	int i;
 
 	/* Check for Promiscuous and All Multicast modes */
@@ -1098,7 +1099,7 @@ ixgb_set_multi(struct net_device *netdev)
 		rctl |= IXGB_RCTL_MPE;
 		IXGB_WRITE_REG(hw, RCTL, rctl);
 	} else {
-		uint8_t mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
+		u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
 			    IXGB_ETH_LENGTH_OF_ADDRESS];
 
 		IXGB_WRITE_REG(hw, RCTL, rctl);
@@ -1164,7 +1165,7 @@ ixgb_watchdog(unsigned long data)
 	}
 
 	/* Force detection of hung controller every watchdog period */
-	adapter->detect_tx_hung = TRUE;
+	adapter->detect_tx_hung = true;
 
 	/* generate an interrupt to force clean up of any stragglers */
 	IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
@@ -1182,8 +1183,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
 {
 	struct ixgb_context_desc *context_desc;
 	unsigned int i;
-	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
-	uint16_t ipcse, tucse, mss;
+	u8 ipcss, ipcso, tucss, tucso, hdr_len;
+	u16 ipcse, tucse, mss;
 	int err;
 
 	if (likely(skb_is_gso(skb))) {
@@ -1243,12 +1244,12 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
 	return 0;
 }
 
-static boolean_t
+static bool
 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
 {
 	struct ixgb_context_desc *context_desc;
 	unsigned int i;
-	uint8_t css, cso;
+	u8 css, cso;
 
 	if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
 		struct ixgb_buffer *buffer_info;
@@ -1264,7 +1265,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
 		context_desc->tucso = cso;
 		context_desc->tucse = 0;
 		/* zero out any previously existing data in one instruction */
-		*(uint32_t *)&(context_desc->ipcss) = 0;
+		*(u32 *)&(context_desc->ipcss) = 0;
 		context_desc->status = 0;
 		context_desc->hdr_len = 0;
 		context_desc->mss = 0;
@@ -1275,10 +1276,10 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
 		if(++i == adapter->tx_ring.count) i = 0;
 		adapter->tx_ring.next_to_use = i;
 
-		return TRUE;
+		return true;
 	}
 
-	return FALSE;
+	return false;
 }
 
 #define IXGB_MAX_TXD_PWR	14
@@ -1371,9 +1372,9 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
 	struct ixgb_tx_desc *tx_desc = NULL;
 	struct ixgb_buffer *buffer_info;
-	uint32_t cmd_type_len = adapter->tx_cmd_type;
-	uint8_t status = 0;
-	uint8_t popts = 0;
+	u32 cmd_type_len = adapter->tx_cmd_type;
+	u8 status = 0;
+	u8 popts = 0;
 	unsigned int i;
 
 	if(tx_flags & IXGB_TX_FLAGS_TSO) {
@@ -1464,14 +1465,18 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	int vlan_id = 0;
 	int tso;
 
+	if (test_bit(__IXGB_DOWN, &adapter->flags)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
 	if(skb->len <= 0) {
 		dev_kfree_skb_any(skb);
 		return 0;
 	}
 
 #ifdef NETIF_F_LLTX
-	local_irq_save(flags);
-	if (!spin_trylock(&adapter->tx_lock)) {
+	if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
 		/* Collision - tell upper layer to requeue */
 		local_irq_restore(flags);
 		return NETDEV_TX_LOCKED;
@@ -1548,7 +1553,7 @@ ixgb_tx_timeout_task(struct work_struct *work)
 		container_of(work, struct ixgb_adapter, tx_timeout_task);
 
 	adapter->tx_timeout_count++;
-	ixgb_down(adapter, TRUE);
+	ixgb_down(adapter, true);
 	ixgb_up(adapter);
 }
 
@@ -1595,7 +1600,7 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
 	netdev->mtu = new_mtu;
 
 	if ((old_max_frame != max_frame) && netif_running(netdev)) {
-		ixgb_down(adapter, TRUE);
+		ixgb_down(adapter, true);
 		ixgb_up(adapter);
 	}
 
@@ -1745,7 +1750,7 @@ ixgb_intr(int irq, void *data)
 	struct net_device *netdev = data;
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
-	uint32_t icr = IXGB_READ_REG(hw, ICR);
+	u32 icr = IXGB_READ_REG(hw, ICR);
 #ifndef CONFIG_IXGB_NAPI
 	unsigned int i;
 #endif
@@ -1753,9 +1758,9 @@ ixgb_intr(int irq, void *data)
 	if(unlikely(!icr))
 		return IRQ_NONE;  /* Not our interrupt */
 
-	if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
-		mod_timer(&adapter->watchdog_timer, jiffies);
-	}
+	if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
+		if (!test_bit(__IXGB_DOWN, &adapter->flags))
+			mod_timer(&adapter->watchdog_timer, jiffies);
 
 #ifdef CONFIG_IXGB_NAPI
 	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
@@ -1764,7 +1769,6 @@ ixgb_intr(int irq, void *data)
 		  of the posted write is intentionally left out.
 		*/
 
-		atomic_inc(&adapter->irq_sem);
 		IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
 		__netif_rx_schedule(netdev, &adapter->napi);
 	}
@@ -1812,7 +1816,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
  * @adapter: board private structure
  **/
 
-static boolean_t
+static bool
 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
 {
 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
@@ -1820,7 +1824,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
 	struct ixgb_tx_desc *tx_desc, *eop_desc;
 	struct ixgb_buffer *buffer_info;
 	unsigned int i, eop;
-	boolean_t cleaned = FALSE;
+	bool cleaned = false;
 
 	i = tx_ring->next_to_clean;
 	eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1828,7 +1832,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
 
 	while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
 
-		for(cleaned = FALSE; !cleaned; ) {
+		for (cleaned = false; !cleaned; ) {
 			tx_desc = IXGB_TX_DESC(*tx_ring, i);
 			buffer_info = &tx_ring->buffer_info[i];
 
@@ -1839,7 +1843,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
 
 			ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
 
-			*(uint32_t *)&(tx_desc->status) = 0;
+			*(u32 *)&(tx_desc->status) = 0;
 
 			cleaned = (i == eop);
 			if(++i == tx_ring->count) i = 0;
@@ -1862,7 +1866,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
 	if(adapter->detect_tx_hung) {
 		/* detect a transmit hang in hardware, this serializes the
 		 * check with the clearing of time_stamp and movement of i */
-		adapter->detect_tx_hung = FALSE;
+		adapter->detect_tx_hung = false;
 		if (tx_ring->buffer_info[eop].dma &&
 		   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
 		   && !(IXGB_READ_REG(&adapter->hw, STATUS) &
@@ -1932,7 +1936,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
  * @adapter: board private structure
  **/
 
-static boolean_t
+static bool
 #ifdef CONFIG_IXGB_NAPI
 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
 #else
@@ -1944,9 +1948,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
 	struct pci_dev *pdev = adapter->pdev;
 	struct ixgb_rx_desc *rx_desc, *next_rxd;
 	struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
-	uint32_t length;
+	u32 length;
 	unsigned int i, j;
-	boolean_t cleaned = FALSE;
+	bool cleaned = false;
 
 	i = rx_ring->next_to_clean;
 	rx_desc = IXGB_RX_DESC(*rx_ring, i);
@@ -1980,7 +1984,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
 		next_skb = next_buffer->skb;
 		prefetch(next_skb);
 
-		cleaned = TRUE;
+		cleaned = true;
 
 		pci_unmap_single(pdev,
 				 buffer_info->dma,
@@ -2162,7 +2166,7 @@ static void
 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
-	uint32_t ctrl, rctl;
+	u32 ctrl, rctl;
 
 	ixgb_irq_disable(adapter);
 	adapter->vlgrp = grp;
@@ -2193,14 +2197,16 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 		IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
 	}
 
-	ixgb_irq_enable(adapter);
+	/* don't enable interrupts unless we are UP */
+	if (adapter->netdev->flags & IFF_UP)
+		ixgb_irq_enable(adapter);
 }
 
 static void
-ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
+ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
-	uint32_t vfta, index;
+	u32 vfta, index;
 
 	/* add VID to filter table */
 
@@ -2211,18 +2217,20 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
 }
 
 static void
-ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
+ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
-	uint32_t vfta, index;
+	u32 vfta, index;
 
 	ixgb_irq_disable(adapter);
 
 	vlan_group_set_device(adapter->vlgrp, vid, NULL);
 
-	ixgb_irq_enable(adapter);
+	/* don't enable interrupts unless we are UP */
+	if (adapter->netdev->flags & IFF_UP)
+		ixgb_irq_enable(adapter);
 
-	/* remove VID from filter table*/
+	/* remove VID from filter table */
 
 	index = (vid >> 5) & 0x7F;
 	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -2236,7 +2244,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
 	ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
 
 	if(adapter->vlgrp) {
-		uint16_t vid;
+		u16 vid;
 		for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
 			if(!vlan_group_get_device(adapter->vlgrp, vid))
 				continue;
@@ -2277,7 +2285,7 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	if(netif_running(netdev))
-		ixgb_down(adapter, TRUE);
+		ixgb_down(adapter, true);
 
 	pci_disable_device(pdev);
 

+ 0 - 7
drivers/net/ixgb/ixgb_osdep.h

@@ -39,13 +39,6 @@
 #include <linux/interrupt.h>
 #include <linux/sched.h>
 
-typedef enum {
-#undef FALSE
-	FALSE = 0,
-#undef TRUE
-	TRUE = 1
-} boolean_t;
-
 #undef ASSERT
 #define ASSERT(x)	if(!(x)) BUG()
 #define MSGOUT(S, A, B)	printk(KERN_DEBUG S "\n", A, B)

+ 76 - 11
drivers/net/ixgbe/ixgbe.h

@@ -36,6 +36,9 @@
 #include "ixgbe_type.h"
 #include "ixgbe_common.h"
 
+#ifdef CONFIG_DCA
+#include <linux/dca.h>
+#endif
 
 #define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
 
@@ -120,7 +123,6 @@ struct ixgbe_queue_stats {
 };
 
 struct ixgbe_ring {
-	struct ixgbe_adapter *adapter;	/* backlink */
 	void *desc;			/* descriptor ring memory */
 	dma_addr_t dma;			/* phys. address of descriptor ring */
 	unsigned int size;		/* length in bytes */
@@ -128,6 +130,7 @@ struct ixgbe_ring {
 	unsigned int next_to_use;
 	unsigned int next_to_clean;
 
+	int queue_index; /* needed for multiqueue queue management */
 	union {
 		struct ixgbe_tx_buffer *tx_buffer_info;
 		struct ixgbe_rx_buffer *rx_buffer_info;
@@ -136,8 +139,21 @@ struct ixgbe_ring {
 	u16 head;
 	u16 tail;
 
+	unsigned int total_bytes;
+	unsigned int total_packets;
 
+	u16 reg_idx; /* holds the special value that gets the hardware register
+		      * offset associated with this ring, which is different
+		      * for DCE and RSS modes */
+
+#ifdef CONFIG_DCA
+	/* cpu for tx queue */
+	int cpu;
+#endif
 	struct ixgbe_queue_stats stats;
+	u8 v_idx; /* maps directly to the index for this ring in the hardware
+		   * vector array, can also be used for finding the bit in EICR
+		   * and friends that represents the vector for this ring */
 
 	u32 eims_value;
 	u16 itr_register;
@@ -146,6 +162,33 @@ struct ixgbe_ring {
 	u16 work_limit;                /* max work per interrupt */
 };
 
+#define RING_F_VMDQ 1
+#define RING_F_RSS  2
+#define IXGBE_MAX_RSS_INDICES  16
+#define IXGBE_MAX_VMDQ_INDICES 16
+struct ixgbe_ring_feature {
+	int indices;
+	int mask;
+};
+
+#define MAX_RX_QUEUES 64
+#define MAX_TX_QUEUES 32
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbe_q_vector {
+	struct ixgbe_adapter *adapter;
+	struct napi_struct napi;
+	DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
+	DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
+	u8 rxr_count;     /* Rx ring count assigned to this vector */
+	u8 txr_count;     /* Tx ring count assigned to this vector */
+	u8 tx_eitr;
+	u8 rx_eitr;
+	u32 eitr;
+};
+
 /* Helper macros to switch between ints/sec and what the register uses.
  * And yes, it's the same math going both ways.
  */
@@ -166,6 +209,14 @@ struct ixgbe_ring {
 
 #define IXGBE_MAX_JUMBO_FRAME_SIZE        16128
 
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 16
+#define MIN_MSIX_Q_VECTORS 2
+#define MAX_MSIX_COUNT (MAX_MSIX_Q_VECTORS + NON_Q_VECTORS)
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
 /* board specific private data structure */
 struct ixgbe_adapter {
 	struct timer_list watchdog_timer;
@@ -173,10 +224,16 @@ struct ixgbe_adapter {
 	u16 bd_number;
 	u16 rx_buf_len;
 	struct work_struct reset_task;
+	struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
+	char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
+
+	/* Interrupt Throttle Rate */
+	u32 itr_setting;
+	u16 eitr_low;
+	u16 eitr_high;
 
 	/* TX */
 	struct ixgbe_ring *tx_ring;	/* One per active queue */
-	struct napi_struct napi;
 	u64 restart_queue;
 	u64 lsc_int;
 	u64 hw_tso_ctxt;
@@ -192,22 +249,27 @@ struct ixgbe_adapter {
 	u64 non_eop_descs;
 	int num_tx_queues;
 	int num_rx_queues;
+	int num_msix_vectors;
+	struct ixgbe_ring_feature ring_feature[3];
 	struct msix_entry *msix_entries;
 
 	u64 rx_hdr_split;
 	u32 alloc_rx_page_failed;
 	u32 alloc_rx_buff_failed;
 
+	/* Some features need tri-state capability,
+	 * thus the additional *_CAPABLE flags.
+	 */
 	u32 flags;
-#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1)
+#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1 << 0)
 #define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_ENABLED			(u32)(1 << 2)
-#define IXGBE_FLAG_RX_PS_ENABLED		(u32)(1 << 3)
-#define IXGBE_FLAG_IN_NETPOLL			(u32)(1 << 4)
-
-	/* Interrupt Throttle Rate */
-	u32 rx_eitr;
-	u32 tx_eitr;
+#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 2)
+#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
+#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 4)
+#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
+#define IXGBE_FLAG_RSS_ENABLED                  (u32)(1 << 6)
+#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 7)
+#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 8)
 
 	/* OS defined structs */
 	struct net_device *netdev;
@@ -218,7 +280,10 @@ struct ixgbe_adapter {
 	struct ixgbe_hw hw;
 	u16 msg_enable;
 	struct ixgbe_hw_stats stats;
-	char lsc_name[IFNAMSIZ + 5];
+
+	/* Interrupt Throttle Rate */
+	u32 rx_eitr;
+	u32 tx_eitr;
 
 	unsigned long state;
 	u64 tx_busy;

+ 28 - 11
drivers/net/ixgbe/ixgbe_ethtool.c

@@ -246,13 +246,26 @@ static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
 
 static int ixgbe_set_tso(struct net_device *netdev, u32 data)
 {
-
 	if (data) {
 		netdev->features |= NETIF_F_TSO;
 		netdev->features |= NETIF_F_TSO6;
 	} else {
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+		struct ixgbe_adapter *adapter = netdev_priv(netdev);
+		int i;
+#endif
+		netif_stop_queue(netdev);
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			netif_stop_subqueue(netdev, i);
+#endif
 		netdev->features &= ~NETIF_F_TSO;
 		netdev->features &= ~NETIF_F_TSO6;
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			netif_start_subqueue(netdev, i);
+#endif
+		netif_start_queue(netdev);
 	}
 	return 0;
 }
@@ -873,13 +886,13 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-	if (adapter->rx_eitr == 0)
-		ec->rx_coalesce_usecs = 0;
+	if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
+		ec->rx_coalesce_usecs = adapter->rx_eitr;
 	else
 		ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
 
-	if (adapter->tx_eitr == 0)
-		ec->tx_coalesce_usecs = 0;
+	if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS)
+		ec->tx_coalesce_usecs = adapter->tx_eitr;
 	else
 		ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
 
@@ -893,22 +906,26 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
 	if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
-	    ((ec->rx_coalesce_usecs > 0) &&
+	    ((ec->rx_coalesce_usecs != 0) &&
+	     (ec->rx_coalesce_usecs != 1) &&
+	     (ec->rx_coalesce_usecs != 3) &&
 	     (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
 		return -EINVAL;
 	if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
-	    ((ec->tx_coalesce_usecs > 0) &&
+	    ((ec->tx_coalesce_usecs != 0) &&
+	     (ec->tx_coalesce_usecs != 1) &&
+	     (ec->tx_coalesce_usecs != 3) &&
 	     (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
 		return -EINVAL;
 
 	/* convert to rate of irq's per second */
-	if (ec->rx_coalesce_usecs == 0)
-		adapter->rx_eitr = 0;
+	if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
+		adapter->rx_eitr = ec->rx_coalesce_usecs;
 	else
 		adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
 
-	if (ec->tx_coalesce_usecs == 0)
-		adapter->tx_eitr = 0;
+	if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
+		adapter->tx_eitr = ec->rx_coalesce_usecs;
 	else
 		adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);
 

+ 1176 - 344
drivers/net/ixgbe/ixgbe_main.c

@@ -48,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
 	"Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "1.1.18"
+#define DRV_VERSION "1.3.18-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
 	 "Copyright (c) 1999-2007 Intel Corporation.";
@@ -80,6 +80,16 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
 
+#ifdef CONFIG_DCA
+static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
+			    void *p);
+static struct notifier_block dca_notifier = {
+	.notifier_call = ixgbe_notify_dca,
+	.next          = NULL,
+	.priority      = 0
+};
+#endif
+
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
@@ -256,26 +266,125 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
 		 * sees the new next_to_clean.
 		 */
 		smp_mb();
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+		    !test_bit(__IXGBE_DOWN, &adapter->state)) {
+			netif_wake_subqueue(netdev, tx_ring->queue_index);
+			adapter->restart_queue++;
+		}
+#else
 		if (netif_queue_stopped(netdev) &&
 		    !test_bit(__IXGBE_DOWN, &adapter->state)) {
 			netif_wake_queue(netdev);
 			adapter->restart_queue++;
 		}
+#endif
 	}
 
 	if (adapter->detect_tx_hung)
 		if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+			netif_stop_subqueue(netdev, tx_ring->queue_index);
+#else
 			netif_stop_queue(netdev);
+#endif
 
 	if (total_tx_packets >= tx_ring->work_limit)
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
 
+	tx_ring->total_bytes += total_tx_bytes;
+	tx_ring->total_packets += total_tx_packets;
 	adapter->net_stats.tx_bytes += total_tx_bytes;
 	adapter->net_stats.tx_packets += total_tx_packets;
 	cleaned = total_tx_packets ? true : false;
 	return cleaned;
 }
 
+#ifdef CONFIG_DCA
+static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
+				struct ixgbe_ring *rxr)
+{
+	u32 rxctrl;
+	int cpu = get_cpu();
+	int q = rxr - adapter->rx_ring;
+
+	if (rxr->cpu != cpu) {
+		rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
+		rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+		rxctrl |= dca_get_tag(cpu);
+		rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+		rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
+		rxr->cpu = cpu;
+	}
+	put_cpu();
+}
+
+static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
+				struct ixgbe_ring *txr)
+{
+	u32 txctrl;
+	int cpu = get_cpu();
+	int q = txr - adapter->tx_ring;
+
+	if (txr->cpu != cpu) {
+		txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
+		txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+		txctrl |= dca_get_tag(cpu);
+		txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
+		txr->cpu = cpu;
+	}
+	put_cpu();
+}
+
+static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
+{
+	int i;
+
+	if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+		return;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		adapter->tx_ring[i].cpu = -1;
+		ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
+	}
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		adapter->rx_ring[i].cpu = -1;
+		ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
+	}
+}
+
+static int __ixgbe_notify_dca(struct device *dev, void *data)
+{
+	struct net_device *netdev = dev_get_drvdata(dev);
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	unsigned long event = *(unsigned long *)data;
+
+	switch (event) {
+	case DCA_PROVIDER_ADD:
+		adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+		/* Always use CB2 mode, difference is masked
+		 * in the CB driver. */
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+		if (dca_add_requester(dev) == 0) {
+			ixgbe_setup_dca(adapter);
+			break;
+		}
+		/* Fall Through since DCA is disabled. */
+	case DCA_PROVIDER_REMOVE:
+		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+			dca_remove_requester(dev);
+			adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+#endif /* CONFIG_DCA */
 /**
  * ixgbe_receive_skb - Send a completed packet up the stack
  * @adapter: board private structure
@@ -556,10 +665,15 @@ next_desc:
 	adapter->net_stats.rx_bytes += total_rx_bytes;
 	adapter->net_stats.rx_packets += total_rx_packets;
 
+	rx_ring->total_packets += total_rx_packets;
+	rx_ring->total_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+
 	return cleaned;
 }
 
-#define IXGBE_MAX_INTR 10
+static int ixgbe_clean_rxonly(struct napi_struct *, int);
 /**
  * ixgbe_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -569,28 +683,195 @@ next_desc:
  **/
 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 {
-	int i, vector = 0;
+	struct ixgbe_q_vector *q_vector;
+	int i, j, q_vectors, v_idx, r_idx;
+	u32 mask;
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i),
-			       IXGBE_MSIX_VECTOR(vector));
-		writel(EITR_INTS_PER_SEC_TO_REG(adapter->tx_eitr),
-		       adapter->hw.hw_addr + adapter->tx_ring[i].itr_register);
-		vector++;
-	}
+	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i),
-			       IXGBE_MSIX_VECTOR(vector));
-		writel(EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr),
-		       adapter->hw.hw_addr + adapter->rx_ring[i].itr_register);
-		vector++;
+	/* Populate the IVAR table and set the ITR values to the
+	 * corresponding register.
+	 */
+	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+		q_vector = &adapter->q_vector[v_idx];
+		/* XXX for_each_bit(...) */
+		r_idx = find_first_bit(q_vector->rxr_idx,
+				      adapter->num_rx_queues);
+
+		for (i = 0; i < q_vector->rxr_count; i++) {
+			j = adapter->rx_ring[r_idx].reg_idx;
+			ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
+			r_idx = find_next_bit(q_vector->rxr_idx,
+					      adapter->num_rx_queues,
+					      r_idx + 1);
+		}
+		r_idx = find_first_bit(q_vector->txr_idx,
+				       adapter->num_tx_queues);
+
+		for (i = 0; i < q_vector->txr_count; i++) {
+			j = adapter->tx_ring[r_idx].reg_idx;
+			ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
+			r_idx = find_next_bit(q_vector->txr_idx,
+					      adapter->num_tx_queues,
+					      r_idx + 1);
+		}
+
+		/* if this is a tx only vector use half the irq (tx) rate */
+		if (q_vector->txr_count && !q_vector->rxr_count)
+			q_vector->eitr = adapter->tx_eitr;
+		else
+			/* rx only or mixed */
+			q_vector->eitr = adapter->rx_eitr;
+
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
+				EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
 	}
 
-	vector = adapter->num_tx_queues + adapter->num_rx_queues;
-	ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX,
-		       IXGBE_MSIX_VECTOR(vector));
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(vector), 1950);
+	ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
+
+	/* set up to autoclear timer, lsc, and the vectors */
+	mask = IXGBE_EIMS_ENABLE_MASK;
+	mask &= ~IXGBE_EIMS_OTHER;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ * ixgbe_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @eitr: eitr setting (ints per sec) to give last timeslice
+ * @itr_setting: current throttle rate in ints/second
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ *      this functionality is controlled by the InterruptThrottleRate module
+ *      parameter (see ixgbe_param.c)
+ **/
+static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
+			   u32 eitr, u8 itr_setting,
+			   int packets, int bytes)
+{
+	unsigned int retval = itr_setting;
+	u32 timepassed_us;
+	u64 bytes_perint;
+
+	if (packets == 0)
+		goto update_itr_done;
+
+
+	/* simple throttlerate management
+	 *    0-20MB/s lowest (100000 ints/s)
+	 *   20-100MB/s low   (20000 ints/s)
+	 *  100-1249MB/s bulk (8000 ints/s)
+	 */
+	/* what was last interrupt timeslice? */
+	timepassed_us = 1000000/eitr;
+	bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+	switch (itr_setting) {
+	case lowest_latency:
+		if (bytes_perint > adapter->eitr_low)
+			retval = low_latency;
+		break;
+	case low_latency:
+		if (bytes_perint > adapter->eitr_high)
+			retval = bulk_latency;
+		else if (bytes_perint <= adapter->eitr_low)
+			retval = lowest_latency;
+		break;
+	case bulk_latency:
+		if (bytes_perint <= adapter->eitr_high)
+			retval = low_latency;
+		break;
+	}
+
+update_itr_done:
+	return retval;
+}
+
+static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
+{
+	struct ixgbe_adapter *adapter = q_vector->adapter;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 new_itr;
+	u8 current_itr, ret_itr;
+	int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
+			      sizeof(struct ixgbe_q_vector);
+	struct ixgbe_ring *rx_ring, *tx_ring;
+
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	for (i = 0; i < q_vector->txr_count; i++) {
+		tx_ring = &(adapter->tx_ring[r_idx]);
+		ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
+					   q_vector->tx_eitr,
+					   tx_ring->total_packets,
+					   tx_ring->total_bytes);
+		/* if the result for this queue would decrease interrupt
+		 * rate for this vector then use that result */
+		q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ?
+				    q_vector->tx_eitr - 1 : ret_itr);
+		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+				      r_idx + 1);
+	}
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0; i < q_vector->rxr_count; i++) {
+		rx_ring = &(adapter->rx_ring[r_idx]);
+		ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
+					   q_vector->rx_eitr,
+					   rx_ring->total_packets,
+					   rx_ring->total_bytes);
+		/* if the result for this queue would decrease interrupt
+		 * rate for this vector then use that result */
+		q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ?
+				    q_vector->rx_eitr - 1 : ret_itr);
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+				      r_idx + 1);
+	}
+
+	current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = 100000;
+		break;
+	case low_latency:
+		new_itr = 20000; /* aka hwitr = ~200 */
+		break;
+	case bulk_latency:
+	default:
+		new_itr = 8000;
+		break;
+	}
+
+	if (new_itr != q_vector->eitr) {
+		u32 itr_reg;
+		/* do an exponential smoothing */
+		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+		q_vector->eitr = new_itr;
+		itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+		/* must write high and low 16 bits to reset counter */
+		DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
+			itr_reg);
+		IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
+	}
+
+	return;
 }
 
 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
@@ -614,153 +895,302 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
 
 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
 {
-	struct ixgbe_ring *txr = data;
-	struct ixgbe_adapter *adapter = txr->adapter;
+	struct ixgbe_q_vector *q_vector = data;
+	struct ixgbe_adapter  *adapter = q_vector->adapter;
+	struct ixgbe_ring     *txr;
+	int i, r_idx;
 
-	ixgbe_clean_tx_irq(adapter, txr);
+	if (!q_vector->txr_count)
+		return IRQ_HANDLED;
+
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	for (i = 0; i < q_vector->txr_count; i++) {
+		txr = &(adapter->tx_ring[r_idx]);
+#ifdef CONFIG_DCA
+		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+			ixgbe_update_tx_dca(adapter, txr);
+#endif
+		txr->total_bytes = 0;
+		txr->total_packets = 0;
+		ixgbe_clean_tx_irq(adapter, txr);
+		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+				      r_idx + 1);
+	}
 
 	return IRQ_HANDLED;
 }
 
+/**
+ * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * @irq: unused
+ * @data: pointer to our q_vector struct for this interrupt vector
+ **/
 static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
 {
-	struct ixgbe_ring *rxr = data;
-	struct ixgbe_adapter *adapter = rxr->adapter;
+	struct ixgbe_q_vector *q_vector = data;
+	struct ixgbe_adapter  *adapter = q_vector->adapter;
+	struct ixgbe_ring  *rxr;
+	int r_idx;
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	if (!q_vector->rxr_count)
+		return IRQ_HANDLED;
+
+	rxr = &(adapter->rx_ring[r_idx]);
+	/* disable interrupts on this vector only */
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx);
+	rxr->total_bytes = 0;
+	rxr->total_packets = 0;
+	netif_rx_schedule(adapter->netdev, &q_vector->napi);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
+{
+	ixgbe_msix_clean_rx(irq, data);
+	ixgbe_msix_clean_tx(irq, data);
 
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims_value);
-	netif_rx_schedule(adapter->netdev, &adapter->napi);
 	return IRQ_HANDLED;
 }
 
+/**
+ * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ **/
 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
 {
-	struct ixgbe_adapter *adapter = container_of(napi,
-					struct ixgbe_adapter, napi);
-	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_q_vector *q_vector =
+			       container_of(napi, struct ixgbe_q_vector, napi);
+	struct ixgbe_adapter *adapter = q_vector->adapter;
+	struct ixgbe_ring *rxr;
 	int work_done = 0;
-	struct ixgbe_ring *rxr = adapter->rx_ring;
+	long r_idx;
 
-	/* Keep link state information with original netdev */
-	if (!netif_carrier_ok(netdev))
-		goto quit_polling;
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	rxr = &(adapter->rx_ring[r_idx]);
+#ifdef CONFIG_DCA
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+		ixgbe_update_rx_dca(adapter, rxr);
+#endif
 
 	ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
 
-	/* If no Tx and not enough Rx work done, exit the polling mode */
-	if ((work_done < budget) || !netif_running(netdev)) {
-quit_polling:
-		netif_rx_complete(netdev, napi);
+	/* If all Rx work done, exit the polling mode */
+	if (work_done < budget) {
+		netif_rx_complete(adapter->netdev, napi);
+		if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
+			ixgbe_set_itr_msix(q_vector);
 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
-			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
-					rxr->eims_value);
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx);
 	}
 
 	return work_done;
 }
 
+static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
+				     int r_idx)
+{
+	a->q_vector[v_idx].adapter = a;
+	set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
+	a->q_vector[v_idx].rxr_count++;
+	a->rx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
+				     int r_idx)
+{
+	a->q_vector[v_idx].adapter = a;
+	set_bit(r_idx, a->q_vector[v_idx].txr_idx);
+	a->q_vector[v_idx].txr_count++;
+	a->tx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
 /**
- * ixgbe_setup_msix - Initialize MSI-X interrupts
+ * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ * @vectors: allotted vector count for descriptor rings
  *
- * ixgbe_setup_msix allocates MSI-X vectors and requests
- * interrutps from the kernel.
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code.  Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible.  You would add new
+ * mapping configurations in here.
  **/
-static int ixgbe_setup_msix(struct ixgbe_adapter *adapter)
-{
-	struct net_device *netdev = adapter->netdev;
-	int i, int_vector = 0, err = 0;
-	int max_msix_count;
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
+				      int vectors)
+{
+	int v_start = 0;
+	int rxr_idx = 0, txr_idx = 0;
+	int rxr_remaining = adapter->num_rx_queues;
+	int txr_remaining = adapter->num_tx_queues;
+	int i, j;
+	int rqpv, tqpv;
+	int err = 0;
+
+	/* No mapping required if MSI-X is disabled. */
+	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+		goto out;
 
-	/* +1 for the LSC interrupt */
-	max_msix_count = adapter->num_rx_queues + adapter->num_tx_queues + 1;
-	adapter->msix_entries = kcalloc(max_msix_count,
-					sizeof(struct msix_entry), GFP_KERNEL);
-	if (!adapter->msix_entries)
-		return -ENOMEM;
+	/*
+	 * The ideal configuration...
+	 * We have enough vectors to map one per queue.
+	 */
+	if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+			map_vector_to_rxq(adapter, v_start, rxr_idx);
 
-	for (i = 0; i < max_msix_count; i++)
-		adapter->msix_entries[i].entry = i;
+		for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+			map_vector_to_txq(adapter, v_start, txr_idx);
 
-	err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
-			      max_msix_count);
-	if (err)
 		goto out;
+	}
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		sprintf(adapter->tx_ring[i].name, "%s-tx%d", netdev->name, i);
-		err = request_irq(adapter->msix_entries[int_vector].vector,
-				  &ixgbe_msix_clean_tx,
-				  0,
-				  adapter->tx_ring[i].name,
-				  &(adapter->tx_ring[i]));
-		if (err) {
-			DPRINTK(PROBE, ERR,
-				"request_irq failed for MSIX interrupt "
-				"Error: %d\n", err);
-			goto release_irqs;
+	/*
+	 * If we don't have enough vectors for a 1-to-1
+	 * mapping, we'll have to group them so there are
+	 * multiple queues per vector.
+	 */
+	/* Re-adjusting *qpv takes care of the remainder. */
+	for (i = v_start; i < vectors; i++) {
+		rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
+		for (j = 0; j < rqpv; j++) {
+			map_vector_to_rxq(adapter, i, rxr_idx);
+			rxr_idx++;
+			rxr_remaining--;
+		}
+	}
+	for (i = v_start; i < vectors; i++) {
+		tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
+		for (j = 0; j < tqpv; j++) {
+			map_vector_to_txq(adapter, i, txr_idx);
+			txr_idx++;
+			txr_remaining--;
 		}
-		adapter->tx_ring[i].eims_value =
-		    (1 << IXGBE_MSIX_VECTOR(int_vector));
-		adapter->tx_ring[i].itr_register = IXGBE_EITR(int_vector);
-		int_vector++;
 	}
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		if (strlen(netdev->name) < (IFNAMSIZ - 5))
-			sprintf(adapter->rx_ring[i].name,
-				"%s-rx%d", netdev->name, i);
-		else
-			memcpy(adapter->rx_ring[i].name,
-			       netdev->name, IFNAMSIZ);
-		err = request_irq(adapter->msix_entries[int_vector].vector,
-				  &ixgbe_msix_clean_rx, 0,
-				  adapter->rx_ring[i].name,
-				  &(adapter->rx_ring[i]));
+out:
+	return err;
+}
+
+/**
+ * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	irqreturn_t (*handler)(int, void *);
+	int i, vector, q_vectors, err;
+
+	/* Decrement for Other and TCP Timer vectors */
+	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	/* Map the Tx/Rx rings to the vectors we were allotted. */
+	err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
+	if (err)
+		goto out;
+
+#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
+			 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
+			 &ixgbe_msix_clean_many)
+	for (vector = 0; vector < q_vectors; vector++) {
+		handler = SET_HANDLER(&adapter->q_vector[vector]);
+		sprintf(adapter->name[vector], "%s:v%d-%s",
+			netdev->name, vector,
+			(handler == &ixgbe_msix_clean_rx) ? "Rx" :
+			 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
+		err = request_irq(adapter->msix_entries[vector].vector,
+				  handler, 0, adapter->name[vector],
+				  &(adapter->q_vector[vector]));
 		if (err) {
 			DPRINTK(PROBE, ERR,
 				"request_irq failed for MSIX interrupt "
 				"Error: %d\n", err);
-			goto release_irqs;
+			goto free_queue_irqs;
 		}
-
-		adapter->rx_ring[i].eims_value =
-		    (1 << IXGBE_MSIX_VECTOR(int_vector));
-		adapter->rx_ring[i].itr_register = IXGBE_EITR(int_vector);
-		int_vector++;
 	}
 
-	sprintf(adapter->lsc_name, "%s-lsc", netdev->name);
-	err = request_irq(adapter->msix_entries[int_vector].vector,
-			  &ixgbe_msix_lsc, 0, adapter->lsc_name, netdev);
+	sprintf(adapter->name[vector], "%s:lsc", netdev->name);
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
 	if (err) {
 		DPRINTK(PROBE, ERR,
 			"request_irq for msix_lsc failed: %d\n", err);
-		goto release_irqs;
+		goto free_queue_irqs;
 	}
 
-	/* FIXME: implement netif_napi_remove() instead */
-	adapter->napi.poll = ixgbe_clean_rxonly;
-	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
 	return 0;
 
-release_irqs:
-	int_vector--;
-	for (; int_vector >= adapter->num_tx_queues; int_vector--)
-		free_irq(adapter->msix_entries[int_vector].vector,
-			 &(adapter->rx_ring[int_vector -
-					    adapter->num_tx_queues]));
-
-	for (; int_vector >= 0; int_vector--)
-		free_irq(adapter->msix_entries[int_vector].vector,
-			 &(adapter->tx_ring[int_vector]));
-out:
+free_queue_irqs:
+	for (i = vector - 1; i >= 0; i--)
+		free_irq(adapter->msix_entries[--vector].vector,
+			 &(adapter->q_vector[i]));
+	adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+	pci_disable_msix(adapter->pdev);
 	kfree(adapter->msix_entries);
 	adapter->msix_entries = NULL;
-	adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+out:
 	return err;
 }
 
+static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbe_q_vector *q_vector = adapter->q_vector;
+	u8 current_itr;
+	u32 new_itr = q_vector->eitr;
+	struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
+	struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
+
+	q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr,
+					     q_vector->tx_eitr,
+					     tx_ring->total_packets,
+					     tx_ring->total_bytes);
+	q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr,
+					     q_vector->rx_eitr,
+					     rx_ring->total_packets,
+					     rx_ring->total_bytes);
+
+	current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = 100000;
+		break;
+	case low_latency:
+		new_itr = 20000; /* aka hwitr = ~200 */
+		break;
+	case bulk_latency:
+		new_itr = 8000;
+		break;
+	default:
+		break;
+	}
+
+	if (new_itr != q_vector->eitr) {
+		u32 itr_reg;
+		/* do an exponential smoothing */
+		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+		q_vector->eitr = new_itr;
+		itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+		/* must write high and low 16 bits to reset counter */
+		IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
+	}
+
+	return;
+}
+
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
+
 /**
- * ixgbe_intr - Interrupt Handler
+ * ixgbe_intr - legacy mode Interrupt Handler
  * @irq: interrupt number
  * @data: pointer to a network interface device structure
  * @pt_regs: CPU registers structure
@@ -772,8 +1202,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 eicr;
 
-	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
 
+	/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
+	 * therefore no explict interrupt disable is necessary */
+	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
 	if (!eicr)
 		return IRQ_NONE;	/* Not our interrupt */
 
@@ -782,16 +1214,33 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
 			mod_timer(&adapter->watchdog_timer, jiffies);
 	}
-	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
-		/* Disable interrupts and register for poll. The flush of the
-		 * posted write is intentionally left out. */
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
-		__netif_rx_schedule(netdev, &adapter->napi);
+
+
+	if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
+		adapter->tx_ring[0].total_packets = 0;
+		adapter->tx_ring[0].total_bytes = 0;
+		adapter->rx_ring[0].total_packets = 0;
+		adapter->rx_ring[0].total_bytes = 0;
+		/* would disable interrupts here but EIAM disabled it */
+		__netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
 	}
 
 	return IRQ_HANDLED;
 }
 
+static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
+{
+	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	for (i = 0; i < q_vectors; i++) {
+		struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+		bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
+		bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
+		q_vector->rxr_count = 0;
+		q_vector->txr_count = 0;
+	}
+}
+
 /**
  * ixgbe_request_irq - initialize interrupts
  * @adapter: board private structure
@@ -799,40 +1248,24 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
  * Attempts to configure interrupts using the best available
  * capabilities of the hardware and kernel.
  **/
-static int ixgbe_request_irq(struct ixgbe_adapter *adapter, u32 *num_rx_queues)
+static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
-	int flags, err;
-	irq_handler_t handler = ixgbe_intr;
-
-	flags = IRQF_SHARED;
-
-	err = ixgbe_setup_msix(adapter);
-	if (!err)
-		goto request_done;
-
-	/*
-	 * if we can't do MSI-X, fall through and try MSI
-	 * No need to reallocate memory since we're decreasing the number of
-	 * queues. We just won't use the other ones, also it is freed correctly
-	 * on ixgbe_remove.
-	 */
-	*num_rx_queues = 1;
+	int err;
 
-	/* do MSI */
-	err = pci_enable_msi(adapter->pdev);
-	if (!err) {
-		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
-		flags &= ~IRQF_SHARED;
-		handler = &ixgbe_intr;
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+		err = ixgbe_request_msix_irqs(adapter);
+	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+		err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
+				  netdev->name, netdev);
+	} else {
+		err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
+				  netdev->name, netdev);
 	}
 
-	err = request_irq(adapter->pdev->irq, handler, flags,
-			  netdev->name, netdev);
 	if (err)
 		DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
 
-request_done:
 	return err;
 }
 
@@ -841,28 +1274,22 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
 	struct net_device *netdev = adapter->netdev;
 
 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-		int i;
+		int i, q_vectors;
 
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			free_irq(adapter->msix_entries[i].vector,
-				 &(adapter->tx_ring[i]));
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			free_irq(adapter->msix_entries[i +
-						adapter->num_tx_queues].vector,
-				&(adapter->rx_ring[i]));
-		i = adapter->num_rx_queues + adapter->num_tx_queues;
+		q_vectors = adapter->num_msix_vectors;
+
+		i = q_vectors - 1;
 		free_irq(adapter->msix_entries[i].vector, netdev);
-		pci_disable_msix(adapter->pdev);
-		kfree(adapter->msix_entries);
-		adapter->msix_entries = NULL;
-		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
-		return;
-	}
 
-	free_irq(adapter->pdev->irq, netdev);
-	if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
-		pci_disable_msi(adapter->pdev);
-		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
+		i--;
+		for (; i >= 0; i--) {
+			free_irq(adapter->msix_entries[i].vector,
+				 &(adapter->q_vector[i]));
+		}
+
+		ixgbe_reset_q_vectors(adapter);
+	} else {
+		free_irq(adapter->pdev->irq, netdev);
 	}
 }
 
@@ -874,7 +1301,13 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
 	IXGBE_WRITE_FLUSH(&adapter->hw);
-	synchronize_irq(adapter->pdev->irq);
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+		int i;
+		for (i = 0; i < adapter->num_msix_vectors; i++)
+			synchronize_irq(adapter->msix_entries[i].vector);
+	} else {
+		synchronize_irq(adapter->pdev->irq);
+	}
 }
 
 /**
@@ -883,12 +1316,9 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
 {
-	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
-				(IXGBE_EIMS_ENABLE_MASK &
-				 ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
-			IXGBE_EIMS_ENABLE_MASK);
+	u32 mask;
+	mask = IXGBE_EIMS_ENABLE_MASK;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
 	IXGBE_WRITE_FLUSH(&adapter->hw);
 }
 
@@ -898,20 +1328,18 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
  **/
 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
 {
-	int i;
 	struct ixgbe_hw *hw = &adapter->hw;
 
-	if (adapter->rx_eitr)
-		IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
-				EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
-
-	/* for re-triggering the interrupt in non-NAPI mode */
-	adapter->rx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
-	adapter->tx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
+	IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
+			EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
 
 	ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
-	for (i = 0; i < adapter->num_tx_queues; i++)
-		ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), i);
+	ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
+
+	map_vector_to_rxq(adapter, 0, 0);
+	map_vector_to_txq(adapter, 0, 0);
+
+	DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
 }
 
 /**
@@ -924,23 +1352,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
 {
 	u64 tdba;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 i, tdlen;
+	u32 i, j, tdlen, txctrl;
 
 	/* Setup the HW Tx Head and Tail descriptor pointers */
 	for (i = 0; i < adapter->num_tx_queues; i++) {
+		j = adapter->tx_ring[i].reg_idx;
 		tdba = adapter->tx_ring[i].dma;
 		tdlen = adapter->tx_ring[i].count *
-		    sizeof(union ixgbe_adv_tx_desc);
-		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & DMA_32BIT_MASK));
-		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), tdlen);
-		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
-		adapter->tx_ring[i].head = IXGBE_TDH(i);
-		adapter->tx_ring[i].tail = IXGBE_TDT(i);
+			sizeof(union ixgbe_adv_tx_desc);
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
+				(tdba & DMA_32BIT_MASK));
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
+		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
+		adapter->tx_ring[i].head = IXGBE_TDH(j);
+		adapter->tx_ring[i].tail = IXGBE_TDT(j);
+		/* Disable Tx Head Writeback RO bit, since this hoses
+		 * bookkeeping if things aren't delivered in order.
+		 */
+		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
 	}
-
-	IXGBE_WRITE_REG(hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT);
 }
 
 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
@@ -959,13 +1393,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
 	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	int i, j;
 	u32 rdlen, rxctrl, rxcsum;
 	u32 random[10];
-	u32 reta, mrqc;
-	int i;
 	u32 fctrl, hlreg0;
-	u32 srrctl;
 	u32 pages;
+	u32 reta = 0, mrqc, srrctl;
 
 	/* Decide whether to use packet split mode or not */
 	if (netdev->mtu > ETH_DATA_LEN)
@@ -985,6 +1418,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 
 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
 	fctrl |= IXGBE_FCTRL_BAM;
+	fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
 
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
@@ -1036,37 +1470,23 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 		adapter->rx_ring[i].tail = IXGBE_RDT(i);
 	}
 
-	if (adapter->num_rx_queues > 1) {
-		/* Random 40bytes used as random key in RSS hash function */
-		get_random_bytes(&random[0], 40);
-
-		switch (adapter->num_rx_queues) {
-		case 8:
-		case 4:
-			/* Bits [3:0] in each byte refers the Rx queue no */
-			reta = 0x00010203;
-			break;
-		case 2:
-			reta = 0x00010001;
-			break;
-		default:
-			reta = 0x00000000;
-			break;
-		}
-
+	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 		/* Fill out redirection table */
-		for (i = 0; i < 32; i++) {
-			IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta);
-			if (adapter->num_rx_queues > 4) {
-				i++;
-				IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i,
-						      0x04050607);
-			}
+		for (i = 0, j = 0; i < 128; i++, j++) {
+			if (j == adapter->ring_feature[RING_F_RSS].indices)
+				j = 0;
+			/* reta = 4-byte sliding window of
+			 * 0x00..(indices-1)(indices-1)00..etc. */
+			reta = (reta << 8) | (j * 0x11);
+			if ((i & 3) == 3)
+				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
 		}
 
 		/* Fill out hash function seeds */
+		/* XXX use a random constant here to glue certain flows */
+		get_random_bytes(&random[0], 40);
 		for (i = 0; i < 10; i++)
-			IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]);
+			IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
 
 		mrqc = IXGBE_MRQC_RSSEN
 		    /* Perform hash on these packet types */
@@ -1080,26 +1500,23 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 		    | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+	}
+
+	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 
-		/* Multiqueue and packet checksumming are mutually exclusive. */
-		rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
+	    adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
+		/* Disable indicating checksum in descriptor, enables
+		 * RSS hash */
 		rxcsum |= IXGBE_RXCSUM_PCSD;
-		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
-	} else {
-		/* Enable Receive Checksum Offload for TCP and UDP */
-		rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
-		if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
-			/* Enable IPv4 payload checksum for UDP fragments
-			 * Must be used in conjunction with packet-split. */
-			rxcsum |= IXGBE_RXCSUM_IPPCSE;
-		} else {
-			/* don't need to clear IPPCSE as it defaults to 0 */
-		}
-		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
 	}
-	/* Enable Receives */
-	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
-	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
+		/* Enable IPv4 payload checksum for UDP fragments
+		 * if PCSD is not set */
+		rxcsum |= IXGBE_RXCSUM_IPPCSE;
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
 }
 
 static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -1219,6 +1636,42 @@ static void ixgbe_set_multi(struct net_device *netdev)
 
 }
 
+static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
+{
+	int q_idx;
+	struct ixgbe_q_vector *q_vector;
+	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	/* legacy and MSI only use one vector */
+	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+		q_vectors = 1;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		q_vector = &adapter->q_vector[q_idx];
+		if (!q_vector->rxr_count)
+			continue;
+		napi_enable(&q_vector->napi);
+	}
+}
+
+static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
+{
+	int q_idx;
+	struct ixgbe_q_vector *q_vector;
+	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	/* legacy and MSI only use one vector */
+	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+		q_vectors = 1;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		q_vector = &adapter->q_vector[q_idx];
+		if (!q_vector->rxr_count)
+			continue;
+		napi_disable(&q_vector->napi);
+	}
+}
+
 static void ixgbe_configure(struct ixgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
@@ -1238,30 +1691,35 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
-	int i;
-	u32 gpie = 0;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 txdctl, rxdctl, mhadd;
+	int i, j = 0;
 	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	u32 txdctl, rxdctl, mhadd;
+	u32 gpie;
 
 	ixgbe_get_hw_control(adapter);
 
-	if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED |
-			      IXGBE_FLAG_MSI_ENABLED)) {
+	if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
+	    (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
 		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 			gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
 				IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
 		} else {
 			/* MSI only */
-			gpie = (IXGBE_GPIE_EIAME |
-				IXGBE_GPIE_PBA_SUPPORT);
+			gpie = 0;
 		}
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
-		gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
+		/* XXX: to interrupt immediately for EICS writes, enable this */
+		/* gpie |= IXGBE_GPIE_EIMEN; */
+		IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 	}
 
-	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+		/* legacy interrupts, use EIAM to auto-mask when reading EICR,
+		 * specifically only auto mask tx and rx interrupts */
+		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+	}
 
+	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
 	if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
 		mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
@@ -1270,15 +1728,21 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 	}
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
+		j = adapter->tx_ring[i].reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
 		txdctl |= IXGBE_TXDCTL_ENABLE;
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
 	}
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
+		j = adapter->rx_ring[i].reg_idx;
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+		/* enable PTHRESH=32 descriptors (half the internal cache)
+		 * and HTHRESH=0 descriptors (to minimize latency on fetch),
+		 * this also removes a pesky rx_no_buffer_count increment */
+		rxdctl |= 0x0020;
 		rxdctl |= IXGBE_RXDCTL_ENABLE;
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
+		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
 	}
 	/* enable all receives */
 	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -1291,7 +1755,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 		ixgbe_configure_msi_and_legacy(adapter);
 
 	clear_bit(__IXGBE_DOWN, &adapter->state);
-	napi_enable(&adapter->napi);
+	ixgbe_napi_enable_all(adapter);
+
+	/* clear any pending interrupts, may auto mask */
+	IXGBE_READ_REG(hw, IXGBE_EICR);
+
 	ixgbe_irq_enable(adapter);
 
 	/* bring the link up in the watchdog, this could race with our first
@@ -1333,7 +1801,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	u32 err, num_rx_queues = adapter->num_rx_queues;
+	u32 err;
 
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
@@ -1349,7 +1817,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
 	if (netif_running(netdev)) {
-		err = ixgbe_request_irq(adapter, &num_rx_queues);
+		err = ixgbe_request_irq(adapter);
 		if (err)
 			return err;
 	}
@@ -1449,27 +1917,27 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
 }
 
 /**
- * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
+ * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
  * @adapter: board private structure
  **/
-static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
+static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
 {
 	int i;
 
-	for (i = 0; i < adapter->num_tx_queues; i++)
-		ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
 /**
- * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
+ * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
  * @adapter: board private structure
  **/
-static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
+static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
 {
 	int i;
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
 }
 
 void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -1493,10 +1961,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 	IXGBE_WRITE_FLUSH(&adapter->hw);
 	msleep(10);
 
-	napi_disable(&adapter->napi);
-
 	ixgbe_irq_disable(adapter);
 
+	ixgbe_napi_disable_all(adapter);
 	del_timer_sync(&adapter->watchdog_timer);
 
 	netif_carrier_off(netdev);
@@ -1547,27 +2014,37 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
 }
 
 /**
- * ixgbe_clean - NAPI Rx polling callback
- * @adapter: board private structure
+ * ixgbe_poll - NAPI Rx polling callback
+ * @napi: structure for representing this polling device
+ * @budget: how many packets driver is allowed to clean
+ *
+ * This function is used for legacy and MSI, NAPI mode
  **/
-static int ixgbe_clean(struct napi_struct *napi, int budget)
+static int ixgbe_poll(struct napi_struct *napi, int budget)
 {
-	struct ixgbe_adapter *adapter = container_of(napi,
-					struct ixgbe_adapter, napi);
-	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_q_vector *q_vector = container_of(napi,
+					  struct ixgbe_q_vector, napi);
+	struct ixgbe_adapter *adapter = q_vector->adapter;
 	int tx_cleaned = 0, work_done = 0;
 
-	/* In non-MSIX case, there is no multi-Tx/Rx queue */
+#ifdef CONFIG_DCA
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+		ixgbe_update_tx_dca(adapter, adapter->tx_ring);
+		ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+	}
+#endif
+
 	tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
-	ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done,
-			   budget);
+	ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
 
 	if (tx_cleaned)
 		work_done = budget;
 
 	/* If budget not fully consumed, exit the polling mode */
 	if (work_done < budget) {
-		netif_rx_complete(netdev, napi);
+		netif_rx_complete(adapter->netdev, napi);
+		if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
+			ixgbe_set_itr(adapter);
 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
 			ixgbe_irq_enable(adapter);
 	}
@@ -1597,6 +2074,136 @@ static void ixgbe_reset_task(struct work_struct *work)
 	ixgbe_reinit_locked(adapter);
 }
 
+static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
+				       int vectors)
+{
+	int err, vector_threshold;
+
+	/* We'll want at least 3 (vector_threshold):
+	 * 1) TxQ[0] Cleanup
+	 * 2) RxQ[0] Cleanup
+	 * 3) Other (Link Status Change, etc.)
+	 * 4) TCP Timer (optional)
+	 */
+	vector_threshold = MIN_MSIX_COUNT;
+
+	/* The more we get, the more we will assign to Tx/Rx Cleanup
+	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+	 * Right now, we simply care about how many we'll get; we'll
+	 * set them up later while requesting irq's.
+	 */
+	while (vectors >= vector_threshold) {
+		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+				      vectors);
+		if (!err) /* Success in acquiring all requested vectors. */
+			break;
+		else if (err < 0)
+			vectors = 0; /* Nasty failure, quit now */
+		else /* err == number of vectors we should try again with */
+			vectors = err;
+	}
+
+	if (vectors < vector_threshold) {
+		/* Can't allocate enough MSI-X interrupts?  Oh well.
+		 * This just means we'll go with either a single MSI
+		 * vector or fall back to legacy interrupts.
+		 */
+		DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
+		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+		adapter->num_tx_queues = 1;
+		adapter->num_rx_queues = 1;
+	} else {
+		adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
+		adapter->num_msix_vectors = vectors;
+	}
+}
+
+static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+{
+	int nrq, ntq;
+	int feature_mask = 0, rss_i, rss_m;
+
+	/* Number of supported queues */
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82598EB:
+		rss_i = adapter->ring_feature[RING_F_RSS].indices;
+		rss_m = 0;
+		feature_mask |= IXGBE_FLAG_RSS_ENABLED;
+
+		switch (adapter->flags & feature_mask) {
+		case (IXGBE_FLAG_RSS_ENABLED):
+			rss_m = 0xF;
+			nrq = rss_i;
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+			ntq = rss_i;
+#else
+			ntq = 1;
+#endif
+			break;
+		case 0:
+		default:
+			rss_i = 0;
+			rss_m = 0;
+			nrq = 1;
+			ntq = 1;
+			break;
+		}
+
+		adapter->ring_feature[RING_F_RSS].indices = rss_i;
+		adapter->ring_feature[RING_F_RSS].mask = rss_m;
+		break;
+	default:
+		nrq = 1;
+		ntq = 1;
+		break;
+	}
+
+	adapter->num_rx_queues = nrq;
+	adapter->num_tx_queues = ntq;
+}
+
+/**
+ * ixgbe_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ **/
+static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
+{
+	/* TODO: Remove all uses of the indices in the cases where multiple
+	 *       features are OR'd together, if the feature set makes sense.
+	 */
+	int feature_mask = 0, rss_i;
+	int i, txr_idx, rxr_idx;
+
+	/* Number of supported queues */
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82598EB:
+		rss_i = adapter->ring_feature[RING_F_RSS].indices;
+		txr_idx = 0;
+		rxr_idx = 0;
+		feature_mask |= IXGBE_FLAG_RSS_ENABLED;
+		switch (adapter->flags & feature_mask) {
+		case (IXGBE_FLAG_RSS_ENABLED):
+			for (i = 0; i < adapter->num_rx_queues; i++)
+				adapter->rx_ring[i].reg_idx = i;
+			for (i = 0; i < adapter->num_tx_queues; i++)
+				adapter->tx_ring[i].reg_idx = i;
+			break;
+		case 0:
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
 /**
  * ixgbe_alloc_queues - Allocate memory for all rings
  * @adapter: board private structure to initialize
@@ -1612,25 +2219,167 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
 				   sizeof(struct ixgbe_ring), GFP_KERNEL);
 	if (!adapter->tx_ring)
-		return -ENOMEM;
-
-	for (i = 0; i < adapter->num_tx_queues; i++)
-		adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
+		goto err_tx_ring_allocation;
 
 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
 				   sizeof(struct ixgbe_ring), GFP_KERNEL);
-	if (!adapter->rx_ring) {
-		kfree(adapter->tx_ring);
-		return -ENOMEM;
-	}
+	if (!adapter->rx_ring)
+		goto err_rx_ring_allocation;
 
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
+		adapter->tx_ring[i].queue_index = i;
+	}
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		adapter->rx_ring[i].adapter = adapter;
-		adapter->rx_ring[i].itr_register = IXGBE_EITR(i);
 		adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
+		adapter->rx_ring[i].queue_index = i;
+	}
+
+	ixgbe_cache_ring_register(adapter);
+
+	return 0;
+
+err_rx_ring_allocation:
+	kfree(adapter->tx_ring);
+err_tx_ring_allocation:
+	return -ENOMEM;
+}
+
+/**
+ * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
+						    *adapter)
+{
+	int err = 0;
+	int vector, v_budget;
+
+	/*
+	 * It's easy to be greedy for MSI-X vectors, but it really
+	 * doesn't do us much good if we have a lot more vectors
+	 * than CPU's.  So let's be conservative and only ask for
+	 * (roughly) twice the number of vectors as there are CPU's.
+	 */
+	v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+		       (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+	/*
+	 * At the same time, hardware can only support a maximum of
+	 * MAX_MSIX_COUNT vectors.  With features such as RSS and VMDq,
+	 * we can easily reach upwards of 64 Rx descriptor queues and
+	 * 32 Tx queues.  Thus, we cap it off in those rare cases where
+	 * the cpu count also exceeds our vector limit.
+	 */
+	v_budget = min(v_budget, MAX_MSIX_COUNT);
+
+	/* A failure in MSI-X entry allocation isn't fatal, but it does
+	 * mean we disable MSI-X capabilities of the adapter. */
+	adapter->msix_entries = kcalloc(v_budget,
+					sizeof(struct msix_entry), GFP_KERNEL);
+	if (!adapter->msix_entries) {
+		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+		ixgbe_set_num_queues(adapter);
+		kfree(adapter->tx_ring);
+		kfree(adapter->rx_ring);
+		err = ixgbe_alloc_queues(adapter);
+		if (err) {
+			DPRINTK(PROBE, ERR, "Unable to allocate memory "
+					    "for queues\n");
+			goto out;
+		}
+
+		goto try_msi;
+	}
+
+	for (vector = 0; vector < v_budget; vector++)
+		adapter->msix_entries[vector].entry = vector;
+
+	ixgbe_acquire_msix_vectors(adapter, v_budget);
+
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+		goto out;
+
+try_msi:
+	err = pci_enable_msi(adapter->pdev);
+	if (!err) {
+		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
+	} else {
+		DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
+				   "falling back to legacy.  Error: %d\n", err);
+		/* reset err */
+		err = 0;
+	}
+
+out:
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+	/* Notify the stack of the (possibly) reduced Tx Queue count. */
+	adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
+#endif
+
+	return err;
+}
+
+static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
+{
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+		pci_disable_msix(adapter->pdev);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
+		pci_disable_msi(adapter->pdev);
 	}
+	return;
+}
+
+/**
+ * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
+ * @adapter: board private structure to initialize
+ *
+ * We determine which interrupt scheme to use based on...
+ * - Kernel support (MSI, MSI-X)
+ *   - which can be user-defined (via MODULE_PARAM)
+ * - Hardware queue count (num_*_queues)
+ *   - defined by miscellaneous hardware support/features (RSS, etc.)
+ **/
+static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
+	int err;
+
+	/* Number of supported queues */
+	ixgbe_set_num_queues(adapter);
+
+	err = ixgbe_alloc_queues(adapter);
+	if (err) {
+		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		goto err_alloc_queues;
+	}
+
+	err = ixgbe_set_interrupt_capability(adapter);
+	if (err) {
+		DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
+		goto err_set_interrupt;
+	}
+
+	DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
+			   "Tx Queue count = %u\n",
+		(adapter->num_rx_queues > 1) ? "Enabled" :
+		"Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+	set_bit(__IXGBE_DOWN, &adapter->state);
 
 	return 0;
+
+err_set_interrupt:
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+err_alloc_queues:
+	return err;
 }
 
 /**
@@ -1645,11 +2394,22 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
+	unsigned int rss;
+
+	/* Set capability flags */
+	rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
+	adapter->ring_feature[RING_F_RSS].indices = rss;
+	adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
+
+	/* Enable Dynamic interrupt throttling by default */
+	adapter->rx_eitr = 1;
+	adapter->tx_eitr = 1;
 
 	/* default flow control settings */
 	hw->fc.original_type = ixgbe_fc_full;
 	hw->fc.type = ixgbe_fc_full;
 
+	/* select 10G link by default */
 	hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
 	if (hw->mac.ops.reset(hw)) {
 		dev_err(&pdev->dev, "HW Init failed\n");
@@ -1667,16 +2427,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 		return -EIO;
 	}
 
-	/* Set the default values */
-	adapter->num_rx_queues = IXGBE_DEFAULT_RXQ;
-	adapter->num_tx_queues = 1;
+	/* enable rx csum by default */
 	adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
 
-	if (ixgbe_alloc_queues(adapter)) {
-		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
-		return -ENOMEM;
-	}
-
 	set_bit(__IXGBE_DOWN, &adapter->state);
 
 	return 0;
@@ -1716,7 +2469,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
 		return -ENOMEM;
 	}
 
-	txdr->adapter = adapter;
 	txdr->next_to_use = 0;
 	txdr->next_to_clean = 0;
 	txdr->work_limit = txdr->count;
@@ -1735,7 +2487,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 			     struct ixgbe_ring *rxdr)
 {
 	struct pci_dev *pdev = adapter->pdev;
-	int size, desc_len;
+	int size;
 
 	size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
 	rxdr->rx_buffer_info = vmalloc(size);
@@ -1746,10 +2498,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 	}
 	memset(rxdr->rx_buffer_info, 0, size);
 
-	desc_len = sizeof(union ixgbe_adv_rx_desc);
-
 	/* Round up to nearest 4K */
-	rxdr->size = rxdr->count * desc_len;
+	rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc);
 	rxdr->size = ALIGN(rxdr->size, 4096);
 
 	rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
@@ -1763,7 +2513,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 
 	rxdr->next_to_clean = 0;
 	rxdr->next_to_use = 0;
-	rxdr->adapter = adapter;
 
 	return 0;
 }
@@ -1841,8 +2590,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 }
 
 /**
- * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources
- *				  (Descriptors) for all queues
+ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
  * @adapter: board private structure
  *
  * If this function returns with an error, then it's possible one or
@@ -1868,8 +2616,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
 }
 
 /**
- * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources
- *				  (Descriptors) for all queues
+ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
  * @adapter: board private structure
  *
  * If this function returns with an error, then it's possible one or
@@ -1911,6 +2658,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
 	    (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
 		return -EINVAL;
 
+	DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
+		netdev->mtu, new_mtu);
+	/* must set new MTU before calling down or up */
 	netdev->mtu = new_mtu;
 
 	if (netif_running(netdev))
@@ -1935,23 +2685,16 @@ static int ixgbe_open(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	int err;
-	u32 num_rx_queues = adapter->num_rx_queues;
 
 	/* disallow open during test */
 	if (test_bit(__IXGBE_TESTING, &adapter->state))
 		return -EBUSY;
 
-try_intr_reinit:
 	/* allocate transmit descriptors */
 	err = ixgbe_setup_all_tx_resources(adapter);
 	if (err)
 		goto err_setup_tx;
 
-	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
-		num_rx_queues = 1;
-		adapter->num_rx_queues = num_rx_queues;
-	}
-
 	/* allocate receive descriptors */
 	err = ixgbe_setup_all_rx_resources(adapter);
 	if (err)
@@ -1959,31 +2702,10 @@ try_intr_reinit:
 
 	ixgbe_configure(adapter);
 
-	err = ixgbe_request_irq(adapter, &num_rx_queues);
+	err = ixgbe_request_irq(adapter);
 	if (err)
 		goto err_req_irq;
 
-	/* ixgbe_request might have reduced num_rx_queues */
-	if (num_rx_queues < adapter->num_rx_queues) {
-		/* We didn't get MSI-X, so we need to release everything,
-		 * set our Rx queue count to num_rx_queues, and redo the
-		 * whole init process.
-		 */
-		ixgbe_free_irq(adapter);
-		if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
-			pci_disable_msi(adapter->pdev);
-			adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
-		}
-		ixgbe_free_all_rx_resources(adapter);
-		ixgbe_free_all_tx_resources(adapter);
-		adapter->num_rx_queues = num_rx_queues;
-
-		/* Reset the hardware, and start over. */
-		ixgbe_reset(adapter);
-
-		goto try_intr_reinit;
-	}
-
 	err = ixgbe_up_complete(adapter);
 	if (err)
 		goto err_up;
@@ -2119,6 +2841,9 @@ static void ixgbe_watchdog(unsigned long data)
 	struct net_device *netdev = adapter->netdev;
 	bool link_up;
 	u32 link_speed = 0;
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+	int i;
+#endif
 
 	adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
 
@@ -2140,6 +2865,10 @@ static void ixgbe_watchdog(unsigned long data)
 
 			netif_carrier_on(netdev);
 			netif_wake_queue(netdev);
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+			for (i = 0; i < adapter->num_tx_queues; i++)
+				netif_wake_subqueue(netdev, i);
+#endif
 		} else {
 			/* Force detection of hung controller */
 			adapter->detect_tx_hung = true;
@@ -2154,10 +2883,23 @@ static void ixgbe_watchdog(unsigned long data)
 
 	ixgbe_update_stats(adapter);
 
-	/* Reset the timer */
-	if (!test_bit(__IXGBE_DOWN, &adapter->state))
+	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+		/* Cause software interrupt to ensure rx rings are cleaned */
+		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+			u32 eics =
+			 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
+		} else {
+			/* for legacy and MSI interrupts don't set any bits that
+			 * are enabled for EIAM, because this operation would
+			 * set *both* EIMS and EICS for any bit in EIAM */
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+				     (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+		}
+		/* Reset the timer */
 		mod_timer(&adapter->watchdog_timer,
 			  round_jiffies(jiffies + 2 * HZ));
+	}
 }
 
 static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -2170,7 +2912,6 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
 	struct ixgbe_tx_buffer *tx_buffer_info;
 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
 	u32 mss_l4len_idx = 0, l4len;
-	*hdr_len = 0;
 
 	if (skb_is_gso(skb)) {
 		if (skb_header_cloned(skb)) {
@@ -2454,7 +3195,11 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+	netif_stop_subqueue(netdev, tx_ring->queue_index);
+#else
 	netif_stop_queue(netdev);
+#endif
 	/* Herbert's original patch had:
 	 *  smp_mb__after_netif_stop_queue();
 	 * but since that doesn't exist yet, just open code it. */
@@ -2466,7 +3211,11 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
 		return -EBUSY;
 
 	/* A reprieve! - use start_queue because it doesn't call schedule */
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+	netif_wake_subqueue(netdev, tx_ring->queue_index);
+#else
 	netif_wake_queue(netdev);
+#endif
 	++adapter->restart_queue;
 	return 0;
 }
@@ -2487,15 +3236,18 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	unsigned int len = skb->len;
 	unsigned int first;
 	unsigned int tx_flags = 0;
-	u8 hdr_len;
-	int tso;
+	u8 hdr_len = 0;
+	int r_idx = 0, tso;
 	unsigned int mss = 0;
 	int count = 0;
 	unsigned int f;
 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 	len -= skb->data_len;
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+	r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
+#endif
+	tx_ring = &adapter->tx_ring[r_idx];
 
-	tx_ring = adapter->tx_ring;
 
 	if (skb->len <= 0) {
 		dev_kfree_skb(skb);
@@ -2603,6 +3355,31 @@ static void ixgbe_netpoll(struct net_device *netdev)
 }
 #endif
 
+/**
+ * ixgbe_napi_add_all - prep napi structs for use
+ * @adapter: private struct
+ * helper function to napi_add each possible q_vector->napi
+ */
+static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
+{
+	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+	int (*poll)(struct napi_struct *, int);
+
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+		poll = &ixgbe_clean_rxonly;
+	} else {
+		poll = &ixgbe_poll;
+		/* only one q_vector for legacy modes */
+		q_vectors = 1;
+	}
+
+	for (i = 0; i < q_vectors; i++) {
+		struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+		netif_napi_add(adapter->netdev, &q_vector->napi,
+			       (*poll), 64);
+	}
+}
+
 /**
  * ixgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
@@ -2655,7 +3432,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
 	pci_set_master(pdev);
 
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+	netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
+#else
 	netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
+#endif
 	if (!netdev) {
 		err = -ENOMEM;
 		goto err_alloc_etherdev;
@@ -2696,7 +3477,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	ixgbe_set_ethtool_ops(netdev);
 	netdev->tx_timeout = &ixgbe_tx_timeout;
 	netdev->watchdog_timeo = 5 * HZ;
-	netif_napi_add(netdev, &adapter->napi, ixgbe_clean, 64);
 	netdev->vlan_rx_register = ixgbe_vlan_rx_register;
 	netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
 	netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
@@ -2719,6 +3499,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
 	/* Setup hw api */
 	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+	hw->mac.type  = ii->mac;
 
 	err = ii->get_invariants(hw);
 	if (err)
@@ -2741,6 +3522,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+	netdev->features |= NETIF_F_MULTI_QUEUE;
+#endif
 
 	/* make sure the EEPROM is good */
 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
@@ -2770,9 +3554,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
 	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
 
-	/* Interrupt Throttle Rate */
-	adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
-	adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
+	err = ixgbe_init_interrupt_scheme(adapter);
+	if (err)
+		goto err_sw_init;
 
 	/* print bus type/speed/width info */
 	pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
@@ -2808,12 +3592,27 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
 	netif_carrier_off(netdev);
 	netif_stop_queue(netdev);
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		netif_stop_subqueue(netdev, i);
+#endif
+
+	ixgbe_napi_add_all(adapter);
 
 	strcpy(netdev->name, "eth%d");
 	err = register_netdev(netdev);
 	if (err)
 		goto err_register;
 
+#ifdef CONFIG_DCA
+	if (dca_add_requester(&pdev->dev) == 0) {
+		adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+		/* always use CB2 mode, difference is masked
+		 * in the CB driver */
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
+		ixgbe_setup_dca(adapter);
+	}
+#endif
 
 	dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
 	cards_found++;
@@ -2823,6 +3622,7 @@ err_register:
 	ixgbe_release_hw_control(adapter);
 err_hw_init:
 err_sw_init:
+	ixgbe_reset_interrupt_capability(adapter);
 err_eeprom:
 	iounmap(hw->hw_addr);
 err_ioremap:
@@ -2854,16 +3654,27 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 
 	flush_scheduled_work();
 
+#ifdef CONFIG_DCA
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+		adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+		dca_remove_requester(&pdev->dev);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+	}
+
+#endif
 	unregister_netdev(netdev);
 
-	ixgbe_release_hw_control(adapter);
+	ixgbe_reset_interrupt_capability(adapter);
 
-	kfree(adapter->tx_ring);
-	kfree(adapter->rx_ring);
+	ixgbe_release_hw_control(adapter);
 
 	iounmap(adapter->hw.hw_addr);
 	pci_release_regions(pdev);
 
+	DPRINTK(PROBE, INFO, "complete\n");
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
 	free_netdev(netdev);
 
 	pci_disable_device(pdev);
@@ -2975,6 +3786,10 @@ static int __init ixgbe_init_module(void)
 
 	printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
 
+#ifdef CONFIG_DCA
+	dca_register_notify(&dca_notifier);
+
+#endif
 	ret = pci_register_driver(&ixgbe_driver);
 	return ret;
 }
@@ -2988,8 +3803,25 @@ module_init(ixgbe_init_module);
  **/
 static void __exit ixgbe_exit_module(void)
 {
+#ifdef CONFIG_DCA
+	dca_unregister_notify(&dca_notifier);
+#endif
 	pci_unregister_driver(&ixgbe_driver);
 }
+
+#ifdef CONFIG_DCA
+static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
+			    void *p)
+{
+	int ret_val;
+
+	ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
+					 __ixgbe_notify_dca);
+
+	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
+}
+#endif /* CONFIG_DCA */
+
 module_exit(ixgbe_exit_module);
 
 /* ixgbe_main.c */

+ 1233 - 0
drivers/net/korina.c

@@ -0,0 +1,1233 @@
+/*
+ *  Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
+ *
+ *  Copyright 2004 IDT Inc. (rischelp@idt.com)
+ *  Copyright 2006 Felix Fietkau <nbd@openwrt.org>
+ *  Copyright 2008 Florian Fainelli <florian@openwrt.org>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *  Writing to a DMA status register:
+ *
+ *  When writing to the status register, you should mask the bit you have
+ *  been testing the status register with. Both Tx and Rx DMA registers
+ *  should stick to this procedure.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/ctype.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/pgtable.h>
+#include <asm/segment.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <asm/mach-rc32434/rb.h>
+#include <asm/mach-rc32434/rc32434.h>
+#include <asm/mach-rc32434/eth.h>
+#include <asm/mach-rc32434/dma_v.h>
+
+#define DRV_NAME        "korina"
+#define DRV_VERSION     "0.10"
+#define DRV_RELDATE     "04Mar2008"
+
+#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
+				   ((dev)->dev_addr[1]))
+#define STATION_ADDRESS_LOW(dev)  (((dev)->dev_addr[2] << 24) | \
+				   ((dev)->dev_addr[3] << 16) | \
+				   ((dev)->dev_addr[4] << 8)  | \
+				   ((dev)->dev_addr[5]))
+
+#define MII_CLOCK 1250000 	/* no more than 2.5MHz */
+
+/* the following must be powers of two */
+#define KORINA_NUM_RDS	64  /* number of receive descriptors */
+#define KORINA_NUM_TDS	64  /* number of transmit descriptors */
+
+#define KORINA_RBSIZE	536 /* size of one resource buffer = Ether MTU */
+#define KORINA_RDS_MASK	(KORINA_NUM_RDS - 1)
+#define KORINA_TDS_MASK	(KORINA_NUM_TDS - 1)
+#define RD_RING_SIZE 	(KORINA_NUM_RDS * sizeof(struct dma_desc))
+#define TD_RING_SIZE	(KORINA_NUM_TDS * sizeof(struct dma_desc))
+
+#define TX_TIMEOUT 	(6000 * HZ / 1000)
+
+enum chain_status { desc_filled, desc_empty };
+#define IS_DMA_FINISHED(X)   (((X) & (DMA_DESC_FINI)) != 0)
+#define IS_DMA_DONE(X)   (((X) & (DMA_DESC_DONE)) != 0)
+#define RCVPKT_LENGTH(X)     (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
+
+/* Information that need to be kept for each board. */
+struct korina_private {
+	struct eth_regs *eth_regs;
+	struct dma_reg *rx_dma_regs;
+	struct dma_reg *tx_dma_regs;
+	struct dma_desc *td_ring; /* transmit descriptor ring */
+	struct dma_desc *rd_ring; /* receive descriptor ring  */
+
+	struct sk_buff *tx_skb[KORINA_NUM_TDS];
+	struct sk_buff *rx_skb[KORINA_NUM_RDS];
+
+	int rx_next_done;
+	int rx_chain_head;
+	int rx_chain_tail;
+	enum chain_status rx_chain_status;
+
+	int tx_next_done;
+	int tx_chain_head;
+	int tx_chain_tail;
+	enum chain_status tx_chain_status;
+	int tx_count;
+	int tx_full;
+
+	int rx_irq;
+	int tx_irq;
+	int ovr_irq;
+	int und_irq;
+
+	spinlock_t lock;        /* NIC xmit lock */
+
+	int dma_halt_cnt;
+	int dma_run_cnt;
+	struct napi_struct napi;
+	struct mii_if_info mii_if;
+	struct net_device *dev;
+	int phy_addr;
+};
+
+extern unsigned int idt_cpu_freq;
+
+static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
+{
+	writel(0, &ch->dmandptr);
+	writel(dma_addr, &ch->dmadptr);
+}
+
+static inline void korina_abort_dma(struct net_device *dev,
+					struct dma_reg *ch)
+{
+       if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
+	       writel(0x10, &ch->dmac);
+
+	       while (!(readl(&ch->dmas) & DMA_STAT_HALT))
+		       dev->trans_start = jiffies;
+
+	       writel(0, &ch->dmas);
+       }
+
+       writel(0, &ch->dmadptr);
+       writel(0, &ch->dmandptr);
+}
+
+static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
+{
+	writel(dma_addr, &ch->dmandptr);
+}
+
+static void korina_abort_tx(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+
+	korina_abort_dma(dev, lp->tx_dma_regs);
+}
+
+static void korina_abort_rx(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+
+	korina_abort_dma(dev, lp->rx_dma_regs);
+}
+
+static void korina_start_rx(struct korina_private *lp,
+					struct dma_desc *rd)
+{
+	korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
+}
+
+static void korina_chain_rx(struct korina_private *lp,
+					struct dma_desc *rd)
+{
+	korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
+}
+
+/* transmit packet */
+static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	unsigned long flags;
+	u32 length;
+	u32 chain_index;
+	struct dma_desc *td;
+
+	spin_lock_irqsave(&lp->lock, flags);
+
+	td = &lp->td_ring[lp->tx_chain_tail];
+
+	/* stop queue when full, drop pkts if queue already full */
+	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
+		lp->tx_full = 1;
+
+		if (lp->tx_count == (KORINA_NUM_TDS - 2))
+			netif_stop_queue(dev);
+		else {
+			dev->stats.tx_dropped++;
+			dev_kfree_skb_any(skb);
+			spin_unlock_irqrestore(&lp->lock, flags);
+
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	lp->tx_count++;
+
+	lp->tx_skb[lp->tx_chain_tail] = skb;
+
+	length = skb->len;
+	dma_cache_wback((u32)skb->data, skb->len);
+
+	/* Setup the transmit descriptor. */
+	dma_cache_inv((u32) td, sizeof(*td));
+	td->ca = CPHYSADDR(skb->data);
+	chain_index = (lp->tx_chain_tail - 1) &
+			KORINA_TDS_MASK;
+
+	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
+		if (lp->tx_chain_status == desc_empty) {
+			/* Update tail */
+			td->control = DMA_COUNT(length) |
+					DMA_DESC_COF | DMA_DESC_IOF;
+			/* Move tail */
+			lp->tx_chain_tail = chain_index;
+			/* Write to NDPTR */
+			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
+					&lp->tx_dma_regs->dmandptr);
+			/* Move head to tail */
+			lp->tx_chain_head = lp->tx_chain_tail;
+		} else {
+			/* Update tail */
+			td->control = DMA_COUNT(length) |
+					DMA_DESC_COF | DMA_DESC_IOF;
+			/* Link to prev */
+			lp->td_ring[chain_index].control &=
+					~DMA_DESC_COF;
+			/* Link to prev */
+			lp->td_ring[chain_index].link =  CPHYSADDR(td);
+			/* Move tail */
+			lp->tx_chain_tail = chain_index;
+			/* Write to NDPTR */
+			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
+					&(lp->tx_dma_regs->dmandptr));
+			/* Move head to tail */
+			lp->tx_chain_head = lp->tx_chain_tail;
+			lp->tx_chain_status = desc_empty;
+		}
+	} else {
+		if (lp->tx_chain_status == desc_empty) {
+			/* Update tail */
+			td->control = DMA_COUNT(length) |
+					DMA_DESC_COF | DMA_DESC_IOF;
+			/* Move tail */
+			lp->tx_chain_tail = chain_index;
+			lp->tx_chain_status = desc_filled;
+			netif_stop_queue(dev);
+		} else {
+			/* Update tail */
+			td->control = DMA_COUNT(length) |
+					DMA_DESC_COF | DMA_DESC_IOF;
+			lp->td_ring[chain_index].control &=
+					~DMA_DESC_COF;
+			lp->td_ring[chain_index].link =  CPHYSADDR(td);
+			lp->tx_chain_tail = chain_index;
+		}
+	}
+	dma_cache_wback((u32) td, sizeof(*td));
+
+	dev->trans_start = jiffies;
+	spin_unlock_irqrestore(&lp->lock, flags);
+
+	return NETDEV_TX_OK;
+}
+
+static int mdio_read(struct net_device *dev, int mii_id, int reg)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	int ret;
+
+	mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
+
+	writel(0, &lp->eth_regs->miimcfg);
+	writel(0, &lp->eth_regs->miimcmd);
+	writel(mii_id | reg, &lp->eth_regs->miimaddr);
+	writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
+
+	ret = (int)(readl(&lp->eth_regs->miimrdd));
+	return ret;
+}
+
+static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
+{
+	struct korina_private *lp = netdev_priv(dev);
+
+	mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
+
+	writel(0, &lp->eth_regs->miimcfg);
+	writel(1, &lp->eth_regs->miimcmd);
+	writel(mii_id | reg, &lp->eth_regs->miimaddr);
+	writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
+	writel(val, &lp->eth_regs->miimwtd);
+}
+
+/* Ethernet Rx DMA interrupt */
+static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct korina_private *lp = netdev_priv(dev);
+	u32 dmas, dmasm;
+	irqreturn_t retval;
+
+	dmas = readl(&lp->rx_dma_regs->dmas);
+	if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
+		netif_rx_schedule_prep(dev, &lp->napi);
+
+		dmasm = readl(&lp->rx_dma_regs->dmasm);
+		writel(dmasm | (DMA_STAT_DONE |
+				DMA_STAT_HALT | DMA_STAT_ERR),
+				&lp->rx_dma_regs->dmasm);
+
+		if (dmas & DMA_STAT_ERR)
+			printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
+
+		retval = IRQ_HANDLED;
+	} else
+		retval = IRQ_NONE;
+
+	return retval;
+}
+
+static int korina_rx(struct net_device *dev, int limit)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
+	struct sk_buff *skb, *skb_new;
+	u8 *pkt_buf;
+	u32 devcs, pkt_len, dmas, rx_free_desc;
+	int count;
+
+	dma_cache_inv((u32)rd, sizeof(*rd));
+
+	for (count = 0; count < limit; count++) {
+
+		devcs = rd->devcs;
+
+		/* Update statistics counters */
+		if (devcs & ETH_RX_CRC)
+			dev->stats.rx_crc_errors++;
+		if (devcs & ETH_RX_LOR)
+			dev->stats.rx_length_errors++;
+		if (devcs & ETH_RX_LE)
+			dev->stats.rx_length_errors++;
+		if (devcs & ETH_RX_OVR)
+			dev->stats.rx_over_errors++;
+		if (devcs & ETH_RX_CV)
+			dev->stats.rx_frame_errors++;
+		if (devcs & ETH_RX_CES)
+			dev->stats.rx_length_errors++;
+		if (devcs & ETH_RX_MP)
+			dev->stats.multicast++;
+
+		if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
+			/* check that this is a whole packet
+			 * WARNING: DMA_FD bit incorrectly set
+			 * in Rc32434 (errata ref #077) */
+			dev->stats.rx_errors++;
+			dev->stats.rx_dropped++;
+		}
+
+		while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
+			/* init the var. used for the later
+			 * operations within the while loop */
+			skb_new = NULL;
+			pkt_len = RCVPKT_LENGTH(devcs);
+			skb = lp->rx_skb[lp->rx_next_done];
+
+			if ((devcs & ETH_RX_ROK)) {
+				/* must be the (first and) last
+				 * descriptor then */
+				pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
+
+				/* invalidate the cache */
+				dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
+
+				/* Malloc up new buffer. */
+				skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
+
+				if (!skb_new)
+					break;
+				/* Do not count the CRC */
+				skb_put(skb, pkt_len - 4);
+				skb->protocol = eth_type_trans(skb, dev);
+
+				/* Pass the packet to upper layers */
+				netif_receive_skb(skb);
+				dev->last_rx = jiffies;
+				dev->stats.rx_packets++;
+				dev->stats.rx_bytes += pkt_len;
+
+				/* Update the mcast stats */
+				if (devcs & ETH_RX_MP)
+					dev->stats.multicast++;
+
+				lp->rx_skb[lp->rx_next_done] = skb_new;
+			}
+
+			rd->devcs = 0;
+
+			/* Restore descriptor's curr_addr */
+			if (skb_new)
+				rd->ca = CPHYSADDR(skb_new->data);
+			else
+				rd->ca = CPHYSADDR(skb->data);
+
+			rd->control = DMA_COUNT(KORINA_RBSIZE) |
+				DMA_DESC_COD | DMA_DESC_IOD;
+			lp->rd_ring[(lp->rx_next_done - 1) &
+				KORINA_RDS_MASK].control &=
+				~DMA_DESC_COD;
+
+			lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
+			dma_cache_wback((u32)rd, sizeof(*rd));
+			rd = &lp->rd_ring[lp->rx_next_done];
+			writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
+		}
+	}
+
+	dmas = readl(&lp->rx_dma_regs->dmas);
+
+	if (dmas & DMA_STAT_HALT) {
+		writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
+				&lp->rx_dma_regs->dmas);
+
+		lp->dma_halt_cnt++;
+		rd->devcs = 0;
+		skb = lp->rx_skb[lp->rx_next_done];
+		rd->ca = CPHYSADDR(skb->data);
+		dma_cache_wback((u32)rd, sizeof(*rd));
+		korina_chain_rx(lp, rd);
+	}
+
+	return count;
+}
+
+static int korina_poll(struct napi_struct *napi, int budget)
+{
+	struct korina_private *lp =
+		container_of(napi, struct korina_private, napi);
+	struct net_device *dev = lp->dev;
+	int work_done;
+
+	work_done = korina_rx(dev, budget);
+	if (work_done < budget) {
+		netif_rx_complete(dev, napi);
+
+		writel(readl(&lp->rx_dma_regs->dmasm) &
+			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
+			&lp->rx_dma_regs->dmasm);
+	}
+	return work_done;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void korina_multicast_list(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	unsigned long flags;
+	struct dev_mc_list *dmi = dev->mc_list;
+	u32 recognise = ETH_ARC_AB;	/* always accept broadcasts */
+	int i;
+
+	/* Set promiscuous mode */
+	if (dev->flags & IFF_PROMISC)
+		recognise |= ETH_ARC_PRO;
+
+	else if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 4))
+		/* All multicast and broadcast */
+		recognise |= ETH_ARC_AM;
+
+	/* Build the hash table */
+	if (dev->mc_count > 4) {
+		u16 hash_table[4];
+		u32 crc;
+
+		for (i = 0; i < 4; i++)
+			hash_table[i] = 0;
+
+		for (i = 0; i < dev->mc_count; i++) {
+			char *addrs = dmi->dmi_addr;
+
+			dmi = dmi->next;
+
+			if (!(*addrs & 1))
+				continue;
+
+			crc = ether_crc_le(6, addrs);
+			crc >>= 26;
+			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+		}
+		/* Accept filtered multicast */
+		recognise |= ETH_ARC_AFM;
+
+		/* Fill the MAC hash tables with their values */
+		writel((u32)(hash_table[1] << 16 | hash_table[0]),
+					&lp->eth_regs->ethhash0);
+		writel((u32)(hash_table[3] << 16 | hash_table[2]),
+					&lp->eth_regs->ethhash1);
+	}
+
+	spin_lock_irqsave(&lp->lock, flags);
+	writel(recognise, &lp->eth_regs->etharc);
+	spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static void korina_tx(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
+	u32 devcs;
+	u32 dmas;
+
+	spin_lock(&lp->lock);
+
+	/* Process all desc that are done */
+	while (IS_DMA_FINISHED(td->control)) {
+		if (lp->tx_full == 1) {
+			netif_wake_queue(dev);
+			lp->tx_full = 0;
+		}
+
+		devcs = lp->td_ring[lp->tx_next_done].devcs;
+		if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
+				(ETH_TX_FD | ETH_TX_LD)) {
+			dev->stats.tx_errors++;
+			dev->stats.tx_dropped++;
+
+			/* Should never happen */
+			printk(KERN_ERR DRV_NAME "%s: split tx ignored\n",
+							dev->name);
+		} else if (devcs & ETH_TX_TOK) {
+			dev->stats.tx_packets++;
+			dev->stats.tx_bytes +=
+					lp->tx_skb[lp->tx_next_done]->len;
+		} else {
+			dev->stats.tx_errors++;
+			dev->stats.tx_dropped++;
+
+			/* Underflow */
+			if (devcs & ETH_TX_UND)
+				dev->stats.tx_fifo_errors++;
+
+			/* Oversized frame */
+			if (devcs & ETH_TX_OF)
+				dev->stats.tx_aborted_errors++;
+
+			/* Excessive deferrals */
+			if (devcs & ETH_TX_ED)
+				dev->stats.tx_carrier_errors++;
+
+			/* Collisions: medium busy */
+			if (devcs & ETH_TX_EC)
+				dev->stats.collisions++;
+
+			/* Late collision */
+			if (devcs & ETH_TX_LC)
+				dev->stats.tx_window_errors++;
+		}
+
+		/* We must always free the original skb */
+		if (lp->tx_skb[lp->tx_next_done]) {
+			dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
+			lp->tx_skb[lp->tx_next_done] = NULL;
+		}
+
+		lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
+		lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
+		lp->td_ring[lp->tx_next_done].link = 0;
+		lp->td_ring[lp->tx_next_done].ca = 0;
+		lp->tx_count--;
+
+		/* Go on to next transmission */
+		lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
+		td = &lp->td_ring[lp->tx_next_done];
+
+	}
+
+	/* Clear the DMA status register */
+	dmas = readl(&lp->tx_dma_regs->dmas);
+	writel(~dmas, &lp->tx_dma_regs->dmas);
+
+	writel(readl(&lp->tx_dma_regs->dmasm) &
+			~(DMA_STAT_FINI | DMA_STAT_ERR),
+			&lp->tx_dma_regs->dmasm);
+
+	spin_unlock(&lp->lock);
+}
+
+static irqreturn_t
+korina_tx_dma_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct korina_private *lp = netdev_priv(dev);
+	u32 dmas, dmasm;
+	irqreturn_t retval;
+
+	dmas = readl(&lp->tx_dma_regs->dmas);
+
+	if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
+		korina_tx(dev);
+
+		dmasm = readl(&lp->tx_dma_regs->dmasm);
+		writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
+				&lp->tx_dma_regs->dmasm);
+
+		if (lp->tx_chain_status == desc_filled &&
+			(readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
+			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
+				&(lp->tx_dma_regs->dmandptr));
+			lp->tx_chain_status = desc_empty;
+			lp->tx_chain_head = lp->tx_chain_tail;
+			dev->trans_start = jiffies;
+		}
+		if (dmas & DMA_STAT_ERR)
+			printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
+
+		retval = IRQ_HANDLED;
+	} else
+		retval = IRQ_NONE;
+
+	return retval;
+}
+
+
+static void korina_check_media(struct net_device *dev, unsigned int init_media)
+{
+	struct korina_private *lp = netdev_priv(dev);
+
+	mii_check_media(&lp->mii_if, 0, init_media);
+
+	if (lp->mii_if.full_duplex)
+		writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
+						&lp->eth_regs->ethmac2);
+	else
+		writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
+						&lp->eth_regs->ethmac2);
+}
+
+static void korina_set_carrier(struct mii_if_info *mii)
+{
+	if (mii->force_media) {
+		/* autoneg is off: Link is always assumed to be up */
+		if (!netif_carrier_ok(mii->dev))
+			netif_carrier_on(mii->dev);
+	} else  /* Let MMI library update carrier status */
+		korina_check_media(mii->dev, 0);
+}
+
+static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	struct mii_ioctl_data *data = if_mii(rq);
+	int rc;
+
+	if (!netif_running(dev))
+		return -EINVAL;
+	spin_lock_irq(&lp->lock);
+	rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
+	spin_unlock_irq(&lp->lock);
+	korina_set_carrier(&lp->mii_if);
+
+	return rc;
+}
+
+/* ethtool helpers */
+static void netdev_get_drvinfo(struct net_device *dev,
+			struct ethtool_drvinfo *info)
+{
+	struct korina_private *lp = netdev_priv(dev);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->bus_info, lp->dev->name);
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	int rc;
+
+	spin_lock_irq(&lp->lock);
+	rc = mii_ethtool_gset(&lp->mii_if, cmd);
+	spin_unlock_irq(&lp->lock);
+
+	return rc;
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	int rc;
+
+	spin_lock_irq(&lp->lock);
+	rc = mii_ethtool_sset(&lp->mii_if, cmd);
+	spin_unlock_irq(&lp->lock);
+	korina_set_carrier(&lp->mii_if);
+
+	return rc;
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+
+	return mii_link_ok(&lp->mii_if);
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+	.get_drvinfo            = netdev_get_drvinfo,
+	.get_settings           = netdev_get_settings,
+	.set_settings           = netdev_set_settings,
+	.get_link               = netdev_get_link,
+};
+
+static void korina_alloc_ring(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	int i;
+
+	/* Initialize the transmit descriptors */
+	for (i = 0; i < KORINA_NUM_TDS; i++) {
+		lp->td_ring[i].control = DMA_DESC_IOF;
+		lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
+		lp->td_ring[i].ca = 0;
+		lp->td_ring[i].link = 0;
+	}
+	lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
+			lp->tx_full = lp->tx_count = 0;
+	lp->tx_chain_status = desc_empty;
+
+	/* Initialize the receive descriptors */
+	for (i = 0; i < KORINA_NUM_RDS; i++) {
+		struct sk_buff *skb = lp->rx_skb[i];
+
+		skb = dev_alloc_skb(KORINA_RBSIZE + 2);
+		if (!skb)
+			break;
+		skb_reserve(skb, 2);
+		lp->rx_skb[i] = skb;
+		lp->rd_ring[i].control = DMA_DESC_IOD |
+				DMA_COUNT(KORINA_RBSIZE);
+		lp->rd_ring[i].devcs = 0;
+		lp->rd_ring[i].ca = CPHYSADDR(skb->data);
+		lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
+	}
+
+	/* loop back */
+	lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[0]);
+	lp->rx_next_done  = 0;
+
+	lp->rd_ring[i].control |= DMA_DESC_COD;
+	lp->rx_chain_head = 0;
+	lp->rx_chain_tail = 0;
+	lp->rx_chain_status = desc_empty;
+}
+
+static void korina_free_ring(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < KORINA_NUM_RDS; i++) {
+		lp->rd_ring[i].control = 0;
+		if (lp->rx_skb[i])
+			dev_kfree_skb_any(lp->rx_skb[i]);
+		lp->rx_skb[i] = NULL;
+	}
+
+	for (i = 0; i < KORINA_NUM_TDS; i++) {
+		lp->td_ring[i].control = 0;
+		if (lp->tx_skb[i])
+			dev_kfree_skb_any(lp->tx_skb[i]);
+		lp->tx_skb[i] = NULL;
+	}
+}
+
+/*
+ * Initialize the RC32434 ethernet controller.
+ */
+static int korina_init(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+
+	/* Disable DMA */
+	korina_abort_tx(dev);
+	korina_abort_rx(dev);
+
+	/* reset ethernet logic */
+	writel(0, &lp->eth_regs->ethintfc);
+	while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
+		dev->trans_start = jiffies;
+
+	/* Enable Ethernet Interface */
+	writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
+
+	/* Allocate rings */
+	korina_alloc_ring(dev);
+
+	writel(0, &lp->rx_dma_regs->dmas);
+	/* Start Rx DMA */
+	korina_start_rx(lp, &lp->rd_ring[0]);
+
+	writel(readl(&lp->tx_dma_regs->dmasm) &
+			~(DMA_STAT_FINI | DMA_STAT_ERR),
+			&lp->tx_dma_regs->dmasm);
+	writel(readl(&lp->rx_dma_regs->dmasm) &
+			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
+			&lp->rx_dma_regs->dmasm);
+
+	/* Accept only packets destined for this Ethernet device address */
+	writel(ETH_ARC_AB, &lp->eth_regs->etharc);
+
+	/* Set all Ether station address registers to their initial values */
+	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
+	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
+
+	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
+	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
+
+	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
+	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
+
+	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
+	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
+
+
+	/* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
+	writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
+			&lp->eth_regs->ethmac2);
+
+	/* Back to back inter-packet-gap */
+	writel(0x15, &lp->eth_regs->ethipgt);
+	/* Non - Back to back inter-packet-gap */
+	writel(0x12, &lp->eth_regs->ethipgr);
+
+	/* Management Clock Prescaler Divisor
+	 * Clock independent setting */
+	writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
+		       &lp->eth_regs->ethmcp);
+
+	/* don't transmit until fifo contains 48b */
+	writel(48, &lp->eth_regs->ethfifott);
+
+	writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
+
+	napi_enable(&lp->napi);
+	netif_start_queue(dev);
+
+	return 0;
+}
+
+/*
+ * Restart the RC32434 ethernet controller.
+ * FIXME: check the return status where we call it
+ */
+static int korina_restart(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	int ret = 0;
+
+	/*
+	 * Disable interrupts
+	 */
+	disable_irq(lp->rx_irq);
+	disable_irq(lp->tx_irq);
+	disable_irq(lp->ovr_irq);
+	disable_irq(lp->und_irq);
+
+	writel(readl(&lp->tx_dma_regs->dmasm) |
+				DMA_STAT_FINI | DMA_STAT_ERR,
+				&lp->tx_dma_regs->dmasm);
+	writel(readl(&lp->rx_dma_regs->dmasm) |
+				DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
+				&lp->rx_dma_regs->dmasm);
+
+	korina_free_ring(dev);
+
+	ret = korina_init(dev);
+	if (ret < 0) {
+		printk(KERN_ERR DRV_NAME "%s: cannot restart device\n",
+								dev->name);
+		return ret;
+	}
+	korina_multicast_list(dev);
+
+	enable_irq(lp->und_irq);
+	enable_irq(lp->ovr_irq);
+	enable_irq(lp->tx_irq);
+	enable_irq(lp->rx_irq);
+
+	return ret;
+}
+
+static void korina_clear_and_restart(struct net_device *dev, u32 value)
+{
+	struct korina_private *lp = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	writel(value, &lp->eth_regs->ethintfc);
+	korina_restart(dev);
+}
+
+/* Ethernet Tx Underflow interrupt */
+static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct korina_private *lp = netdev_priv(dev);
+	unsigned int und;
+
+	spin_lock(&lp->lock);
+
+	und = readl(&lp->eth_regs->ethintfc);
+
+	if (und & ETH_INT_FC_UND)
+		korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);
+
+	spin_unlock(&lp->lock);
+
+	return IRQ_HANDLED;
+}
+
+static void korina_tx_timeout(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->lock, flags);
+	korina_restart(dev);
+	spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/* Ethernet Rx Overflow interrupt */
+static irqreturn_t
+korina_ovr_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct korina_private *lp = netdev_priv(dev);
+	unsigned int ovr;
+
+	spin_lock(&lp->lock);
+	ovr = readl(&lp->eth_regs->ethintfc);
+
+	if (ovr & ETH_INT_FC_OVR)
+		korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);
+
+	spin_unlock(&lp->lock);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void korina_poll_controller(struct net_device *dev)
+{
+	disable_irq(dev->irq);
+	korina_tx_dma_interrupt(dev->irq, dev);
+	enable_irq(dev->irq);
+}
+#endif
+
+static int korina_open(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	int ret = 0;
+
+	/* Initialize */
+	ret = korina_init(dev);
+	if (ret < 0) {
+		printk(KERN_ERR DRV_NAME "%s: cannot open device\n", dev->name);
+		goto out;
+	}
+
+	/* Install the interrupt handler
+	 * that handles the Done Finished
+	 * Ovr and Und Events */
+	ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt,
+		IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Rx", dev);
+	if (ret < 0) {
+		printk(KERN_ERR DRV_NAME "%s: unable to get Rx DMA IRQ %d\n",
+		    dev->name, lp->rx_irq);
+		goto err_release;
+	}
+	ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt,
+		IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Tx", dev);
+	if (ret < 0) {
+		printk(KERN_ERR DRV_NAME "%s: unable to get Tx DMA IRQ %d\n",
+		    dev->name, lp->tx_irq);
+		goto err_free_rx_irq;
+	}
+
+	/* Install handler for overrun error. */
+	ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt,
+			IRQF_SHARED | IRQF_DISABLED, "Ethernet Overflow", dev);
+	if (ret < 0) {
+		printk(KERN_ERR DRV_NAME"%s: unable to get OVR IRQ %d\n",
+		    dev->name, lp->ovr_irq);
+		goto err_free_tx_irq;
+	}
+
+	/* Install handler for underflow error. */
+	ret = request_irq(lp->und_irq, &korina_und_interrupt,
+			IRQF_SHARED | IRQF_DISABLED, "Ethernet Underflow", dev);
+	if (ret < 0) {
+		printk(KERN_ERR DRV_NAME "%s: unable to get UND IRQ %d\n",
+		    dev->name, lp->und_irq);
+		goto err_free_ovr_irq;
+	}
+
+err_free_ovr_irq:
+	free_irq(lp->ovr_irq, dev);
+err_free_tx_irq:
+	free_irq(lp->tx_irq, dev);
+err_free_rx_irq:
+	free_irq(lp->rx_irq, dev);
+err_release:
+	korina_free_ring(dev);
+	goto out;
+out:
+	return ret;
+}
+
+static int korina_close(struct net_device *dev)
+{
+	struct korina_private *lp = netdev_priv(dev);
+	u32 tmp;
+
+	/* Disable interrupts */
+	disable_irq(lp->rx_irq);
+	disable_irq(lp->tx_irq);
+	disable_irq(lp->ovr_irq);
+	disable_irq(lp->und_irq);
+
+	korina_abort_tx(dev);
+	tmp = readl(&lp->tx_dma_regs->dmasm);
+	tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
+	writel(tmp, &lp->tx_dma_regs->dmasm);
+
+	korina_abort_rx(dev);
+	tmp = readl(&lp->rx_dma_regs->dmasm);
+	tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
+	writel(tmp, &lp->rx_dma_regs->dmasm);
+
+	korina_free_ring(dev);
+
+	free_irq(lp->rx_irq, dev);
+	free_irq(lp->tx_irq, dev);
+	free_irq(lp->ovr_irq, dev);
+	free_irq(lp->und_irq, dev);
+
+	return 0;
+}
+
+static int korina_probe(struct platform_device *pdev)
+{
+	struct korina_device *bif = platform_get_drvdata(pdev);
+	struct korina_private *lp;
+	struct net_device *dev;
+	struct resource *r;
+	int retval, err;
+
+	dev = alloc_etherdev(sizeof(struct korina_private));
+	if (!dev) {
+		printk(KERN_ERR DRV_NAME ": alloc_etherdev failed\n");
+		return -ENOMEM;
+	}
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	platform_set_drvdata(pdev, dev);
+	lp = netdev_priv(dev);
+
+	bif->dev = dev;
+	memcpy(dev->dev_addr, bif->mac, 6);
+
+	lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
+	lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
+	lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
+	lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
+	dev->base_addr = r->start;
+	lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
+	if (!lp->eth_regs) {
+		printk(KERN_ERR DRV_NAME "cannot remap registers\n");
+		retval = -ENXIO;
+		goto probe_err_out;
+	}
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
+	lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
+	if (!lp->rx_dma_regs) {
+		printk(KERN_ERR DRV_NAME "cannot remap Rx DMA registers\n");
+		retval = -ENXIO;
+		goto probe_err_dma_rx;
+	}
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
+	lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
+	if (!lp->tx_dma_regs) {
+		printk(KERN_ERR DRV_NAME "cannot remap Tx DMA registers\n");
+		retval = -ENXIO;
+		goto probe_err_dma_tx;
+	}
+
+	lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
+	if (!lp->td_ring) {
+		printk(KERN_ERR DRV_NAME "cannot allocate descriptors\n");
+		retval = -ENOMEM;
+		goto probe_err_td_ring;
+	}
+
+	dma_cache_inv((unsigned long)(lp->td_ring),
+			TD_RING_SIZE + RD_RING_SIZE);
+
+	/* now convert TD_RING pointer to KSEG1 */
+	lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
+	lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
+
+	spin_lock_init(&lp->lock);
+	/* just use the rx dma irq */
+	dev->irq = lp->rx_irq;
+	lp->dev = dev;
+
+	dev->open = korina_open;
+	dev->stop = korina_close;
+	dev->hard_start_xmit = korina_send_packet;
+	dev->set_multicast_list = &korina_multicast_list;
+	dev->ethtool_ops = &netdev_ethtool_ops;
+	dev->tx_timeout = korina_tx_timeout;
+	dev->watchdog_timeo = TX_TIMEOUT;
+	dev->do_ioctl = &korina_ioctl;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = korina_poll_controller;
+#endif
+	netif_napi_add(dev, &lp->napi, korina_poll, 64);
+
+	lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
+	lp->mii_if.dev = dev;
+	lp->mii_if.mdio_read = mdio_read;
+	lp->mii_if.mdio_write = mdio_write;
+	lp->mii_if.phy_id = lp->phy_addr;
+	lp->mii_if.phy_id_mask = 0x1f;
+	lp->mii_if.reg_num_mask = 0x1f;
+
+	err = register_netdev(dev);
+	if (err) {
+		printk(KERN_ERR DRV_NAME
+			": cannot register net device %d\n", err);
+		retval = -EINVAL;
+		goto probe_err_register;
+	}
+	return 0;
+
+probe_err_register:
+	kfree(lp->td_ring);
+probe_err_td_ring:
+	iounmap(lp->tx_dma_regs);
+probe_err_dma_tx:
+	iounmap(lp->rx_dma_regs);
+probe_err_dma_rx:
+	iounmap(lp->eth_regs);
+probe_err_out:
+	free_netdev(dev);
+	return retval;
+}
+
+static int korina_remove(struct platform_device *pdev)
+{
+	struct korina_device *bif = platform_get_drvdata(pdev);
+	struct korina_private *lp = netdev_priv(bif->dev);
+
+	if (lp->eth_regs)
+		iounmap(lp->eth_regs);
+	if (lp->rx_dma_regs)
+		iounmap(lp->rx_dma_regs);
+	if (lp->tx_dma_regs)
+		iounmap(lp->tx_dma_regs);
+
+	platform_set_drvdata(pdev, NULL);
+	unregister_netdev(bif->dev);
+	free_netdev(bif->dev);
+
+	return 0;
+}
+
+static struct platform_driver korina_driver = {
+	.driver.name = "korina",
+	.probe = korina_probe,
+	.remove = korina_remove,
+};
+
+static int __init korina_init_module(void)
+{
+	return platform_driver_register(&korina_driver);
+}
+
+static void korina_cleanup_module(void)
+{
+	return platform_driver_unregister(&korina_driver);
+}
+
+module_init(korina_init_module);
+module_exit(korina_cleanup_module);
+
+MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
+MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
+MODULE_LICENSE("GPL");

+ 1 - 1
drivers/net/loopback.c

@@ -258,7 +258,7 @@ static __net_init int loopback_net_init(struct net *net)
 	if (!dev)
 		goto out;
 
-	dev->nd_net = net;
+	dev_net_set(dev, net);
 	err = register_netdev(dev);
 	if (err)
 		goto out_free_netdev;

+ 1 - 1
drivers/net/macb.c

@@ -246,7 +246,7 @@ static int macb_mii_init(struct macb *bp)
 	bp->mii_bus.read = &macb_mdio_read;
 	bp->mii_bus.write = &macb_mdio_write;
 	bp->mii_bus.reset = &macb_mdio_reset;
-	bp->mii_bus.id = bp->pdev->id;
+	snprintf(bp->mii_bus.id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
 	bp->mii_bus.priv = bp;
 	bp->mii_bus.dev = &bp->dev->dev;
 	pdata = bp->pdev->dev.platform_data;

+ 1 - 1
drivers/net/macvlan.c

@@ -402,7 +402,7 @@ static int macvlan_newlink(struct net_device *dev,
 	if (!tb[IFLA_LINK])
 		return -EINVAL;
 
-	lowerdev = __dev_get_by_index(dev->nd_net, nla_get_u32(tb[IFLA_LINK]));
+	lowerdev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
 	if (lowerdev == NULL)
 		return -ENODEV;
 

+ 231 - 244
drivers/net/mv643xx_eth.c

@@ -3,7 +3,8 @@
  * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
  *
  * Based on the 64360 driver from:
- * Copyright (C) 2002 rabeeh@galileo.co.il
+ * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
+ *		      Rabeeh Khoury <rabeeh@marvell.com>
  *
  * Copyright (C) 2003 PMC-Sierra, Inc.,
  *	written by Manish Lachwani
@@ -16,6 +17,9 @@
  * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  *				     <sjhill@realitydiluted.com>
  *
+ * Copyright (C) 2007-2008 Marvell Semiconductor
+ *			   Lennert Buytenhek <buytenh@marvell.com>
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
@@ -63,20 +67,6 @@
 #define MV643XX_TX_FAST_REFILL
 #undef	MV643XX_COAL
 
-/*
- * Number of RX / TX descriptors on RX / TX rings.
- * Note that allocating RX descriptors is done by allocating the RX
- * ring AND a preallocated RX buffers (skb's) for each descriptor.
- * The TX descriptors only allocates the TX descriptors ring,
- * with no pre allocated TX buffers (skb's are allocated by higher layers.
- */
-
-/* Default TX ring size is 1000 descriptors */
-#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
-
-/* Default RX ring size is 400 descriptors */
-#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
-
 #define MV643XX_TX_COAL 100
 #ifdef MV643XX_COAL
 #define MV643XX_RX_COAL 100
@@ -434,14 +424,6 @@ typedef enum _eth_func_ret_status {
 	ETH_QUEUE_LAST_RESOURCE	/* Ring resources about to exhaust.	*/
 } ETH_FUNC_RET_STATUS;
 
-typedef enum _eth_target {
-	ETH_TARGET_DRAM,
-	ETH_TARGET_DEVICE,
-	ETH_TARGET_CBS,
-	ETH_TARGET_PCI0,
-	ETH_TARGET_PCI1
-} ETH_TARGET;
-
 /* These are for big-endian machines.  Little endian needs different
  * definitions.
  */
@@ -586,43 +568,44 @@ struct mv643xx_private {
 
 /* Static function declarations */
 static void eth_port_init(struct mv643xx_private *mp);
-static void eth_port_reset(unsigned int eth_port_num);
+static void eth_port_reset(struct mv643xx_private *mp);
 static void eth_port_start(struct net_device *dev);
 
-static void ethernet_phy_reset(unsigned int eth_port_num);
+static void ethernet_phy_reset(struct mv643xx_private *mp);
 
-static void eth_port_write_smi_reg(unsigned int eth_port_num,
+static void eth_port_write_smi_reg(struct mv643xx_private *mp,
 				   unsigned int phy_reg, unsigned int value);
 
-static void eth_port_read_smi_reg(unsigned int eth_port_num,
+static void eth_port_read_smi_reg(struct mv643xx_private *mp,
 				  unsigned int phy_reg, unsigned int *value);
 
-static void eth_clear_mib_counters(unsigned int eth_port_num);
+static void eth_clear_mib_counters(struct mv643xx_private *mp);
 
 static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
 					    struct pkt_info *p_pkt_info);
 static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
 					      struct pkt_info *p_pkt_info);
 
-static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr);
-static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr);
+static void eth_port_uc_addr_get(struct mv643xx_private *mp,
+				 unsigned char *p_addr);
+static void eth_port_uc_addr_set(struct mv643xx_private *mp,
+				 unsigned char *p_addr);
 static void eth_port_set_multicast_list(struct net_device *);
-static void mv643xx_eth_port_enable_tx(unsigned int port_num,
+static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
 						unsigned int queues);
-static void mv643xx_eth_port_enable_rx(unsigned int port_num,
+static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
 						unsigned int queues);
-static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num);
-static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num);
+static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp);
+static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp);
 static int mv643xx_eth_open(struct net_device *);
 static int mv643xx_eth_stop(struct net_device *);
-static int mv643xx_eth_change_mtu(struct net_device *, int);
-static void eth_port_init_mac_tables(unsigned int eth_port_num);
+static void eth_port_init_mac_tables(struct mv643xx_private *mp);
 #ifdef MV643XX_NAPI
 static int mv643xx_poll(struct napi_struct *napi, int budget);
 #endif
-static int ethernet_phy_get(unsigned int eth_port_num);
-static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
-static int ethernet_phy_detect(unsigned int eth_port_num);
+static int ethernet_phy_get(struct mv643xx_private *mp);
+static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr);
+static int ethernet_phy_detect(struct mv643xx_private *mp);
 static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location);
 static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val);
 static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
@@ -636,12 +619,12 @@ static void __iomem *mv643xx_eth_base;
 /* used to protect SMI_REG, which is shared across ports */
 static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
 
-static inline u32 mv_read(int offset)
+static inline u32 rdl(struct mv643xx_private *mp, int offset)
 {
 	return readl(mv643xx_eth_base + offset);
 }
 
-static inline void mv_write(int offset, u32 data)
+static inline void wrl(struct mv643xx_private *mp, int offset, u32 data)
 {
 	writel(data, mv643xx_eth_base + offset);
 }
@@ -659,18 +642,19 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
 		return -EINVAL;
 
 	dev->mtu = new_mtu;
+	if (!netif_running(dev))
+		return 0;
+
 	/*
-	 * Stop then re-open the interface. This will allocate RX skb's with
-	 * the new MTU.
-	 * There is a possible danger that the open will not successed, due
-	 * to memory is full, which might fail the open function.
+	 * Stop and then re-open the interface. This will allocate RX
+	 * skbs of the new MTU.
+	 * There is a possible danger that the open will not succeed,
+	 * due to memory being full, which might fail the open function.
 	 */
-	if (netif_running(dev)) {
-		mv643xx_eth_stop(dev);
-		if (mv643xx_eth_open(dev))
-			printk(KERN_ERR
-				"%s: Fatal error on opening device\n",
-				dev->name);
+	mv643xx_eth_stop(dev);
+	if (mv643xx_eth_open(dev)) {
+		printk(KERN_ERR "%s: Fatal error on opening device\n",
+			dev->name);
 	}
 
 	return 0;
@@ -748,10 +732,9 @@ static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
 static void mv643xx_eth_update_mac_address(struct net_device *dev)
 {
 	struct mv643xx_private *mp = netdev_priv(dev);
-	unsigned int port_num = mp->port_num;
 
-	eth_port_init_mac_tables(port_num);
-	eth_port_uc_addr_set(port_num, dev->dev_addr);
+	eth_port_init_mac_tables(mp);
+	eth_port_uc_addr_set(mp, dev->dev_addr);
 }
 
 /*
@@ -767,12 +750,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
 	struct mv643xx_private *mp = netdev_priv(dev);
 	u32 config_reg;
 
-	config_reg = mv_read(PORT_CONFIG_REG(mp->port_num));
+	config_reg = rdl(mp, PORT_CONFIG_REG(mp->port_num));
 	if (dev->flags & IFF_PROMISC)
 		config_reg |= (u32) UNICAST_PROMISCUOUS_MODE;
 	else
 		config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE;
-	mv_write(PORT_CONFIG_REG(mp->port_num), config_reg);
+	wrl(mp, PORT_CONFIG_REG(mp->port_num), config_reg);
 
 	eth_port_set_multicast_list(dev);
 }
@@ -826,14 +809,14 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
 {
 	struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private,
 						  tx_timeout_task);
-	struct net_device *dev = mp->mii.dev; /* yuck */
+	struct net_device *dev = mp->dev;
 
 	if (!netif_running(dev))
 		return;
 
 	netif_stop_queue(dev);
 
-	eth_port_reset(mp->port_num);
+	eth_port_reset(mp);
 	eth_port_start(dev);
 
 	if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
@@ -845,7 +828,7 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
  *
  * If force is non-zero, frees uncompleted descriptors as well
  */
-int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
+static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
 {
 	struct mv643xx_private *mp = netdev_priv(dev);
 	struct eth_tx_desc *desc;
@@ -1008,7 +991,7 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
 	u32 o_pscr, n_pscr;
 	unsigned int queues;
 
-	o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
+	o_pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
 	n_pscr = o_pscr;
 
 	/* clear speed, duplex and rx buffer size fields */
@@ -1031,16 +1014,16 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
 
 	if (n_pscr != o_pscr) {
 		if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
-			mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
+			wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
 		else {
-			queues = mv643xx_eth_port_disable_tx(port_num);
+			queues = mv643xx_eth_port_disable_tx(mp);
 
 			o_pscr &= ~SERIAL_PORT_ENABLE;
-			mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr);
-			mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
-			mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
+			wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), o_pscr);
+			wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
+			wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
 			if (queues)
-				mv643xx_eth_port_enable_tx(port_num, queues);
+				mv643xx_eth_port_enable_tx(mp, queues);
 		}
 	}
 }
@@ -1064,13 +1047,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
 	unsigned int port_num = mp->port_num;
 
 	/* Read interrupt cause registers */
-	eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) &
+	eth_int_cause = rdl(mp, INTERRUPT_CAUSE_REG(port_num)) &
 						ETH_INT_UNMASK_ALL;
 	if (eth_int_cause & ETH_INT_CAUSE_EXT) {
-		eth_int_cause_ext = mv_read(
+		eth_int_cause_ext = rdl(mp,
 			INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
 						ETH_INT_UNMASK_ALL_EXT;
-		mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num),
+		wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num),
 							~eth_int_cause_ext);
 	}
 
@@ -1081,8 +1064,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
 		if (mii_link_ok(&mp->mii)) {
 			mii_ethtool_gset(&mp->mii, &cmd);
 			mv643xx_eth_update_pscr(dev, &cmd);
-			mv643xx_eth_port_enable_tx(port_num,
-						   ETH_TX_QUEUES_ENABLED);
+			mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED);
 			if (!netif_carrier_ok(dev)) {
 				netif_carrier_on(dev);
 				if (mp->tx_ring_size - mp->tx_desc_count >=
@@ -1098,10 +1080,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
 #ifdef MV643XX_NAPI
 	if (eth_int_cause & ETH_INT_CAUSE_RX) {
 		/* schedule the NAPI poll routine to maintain port */
-		mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
+		wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
 
 		/* wait for previous write to complete */
-		mv_read(INTERRUPT_MASK_REG(port_num));
+		rdl(mp, INTERRUPT_MASK_REG(port_num));
 
 		netif_rx_schedule(dev, &mp->napi);
 	}
@@ -1136,7 +1118,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
  *	, and the required delay of the interrupt in usec.
  *
  * INPUT:
- *	unsigned int eth_port_num	Ethernet port number
+ *	struct mv643xx_private *mp	Ethernet port
  *	unsigned int t_clk		t_clk of the MV-643xx chip in HZ units
  *	unsigned int delay		Delay in usec
  *
@@ -1147,15 +1129,16 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
  *	The interrupt coalescing value set in the gigE port.
  *
  */
-static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
+static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
 					unsigned int t_clk, unsigned int delay)
 {
+	unsigned int port_num = mp->port_num;
 	unsigned int coal = ((t_clk / 1000000) * delay) / 64;
 
 	/* Set RX Coalescing mechanism */
-	mv_write(SDMA_CONFIG_REG(eth_port_num),
+	wrl(mp, SDMA_CONFIG_REG(port_num),
 		((coal & 0x3fff) << 8) |
-		(mv_read(SDMA_CONFIG_REG(eth_port_num))
+		(rdl(mp, SDMA_CONFIG_REG(port_num))
 			& 0xffc000ff));
 
 	return coal;
@@ -1174,7 +1157,7 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
  *	MV-643xx chip and the required delay in the interrupt in uSec
  *
  * INPUT:
- *	unsigned int eth_port_num	Ethernet port number
+ *	struct mv643xx_private *mp	Ethernet port
  *	unsigned int t_clk		t_clk of the MV-643xx chip in HZ units
  *	unsigned int delay		Delay in uSeconds
  *
@@ -1185,13 +1168,14 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
  *	The interrupt coalescing value set in the gigE port.
  *
  */
-static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
+static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp,
 					unsigned int t_clk, unsigned int delay)
 {
-	unsigned int coal;
-	coal = ((t_clk / 1000000) * delay) / 64;
+	unsigned int coal = ((t_clk / 1000000) * delay) / 64;
+
 	/* Set TX Coalescing mechanism */
-	mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4);
+	wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4);
+
 	return coal;
 }
 
@@ -1327,16 +1311,15 @@ static int mv643xx_eth_open(struct net_device *dev)
 	int err;
 
 	/* Clear any pending ethernet port interrupts */
-	mv_write(INTERRUPT_CAUSE_REG(port_num), 0);
-	mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
+	wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0);
+	wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
 	/* wait for previous write to complete */
-	mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num));
+	rdl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num));
 
 	err = request_irq(dev->irq, mv643xx_eth_int_handler,
 			IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
 	if (err) {
-		printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
-								port_num);
+		printk(KERN_ERR "%s: Can not assign IRQ\n", dev->name);
 		return -EAGAIN;
 	}
 
@@ -1430,17 +1413,17 @@ static int mv643xx_eth_open(struct net_device *dev)
 
 #ifdef MV643XX_COAL
 	mp->rx_int_coal =
-		eth_port_set_rx_coal(port_num, 133000000, MV643XX_RX_COAL);
+		eth_port_set_rx_coal(mp, 133000000, MV643XX_RX_COAL);
 #endif
 
 	mp->tx_int_coal =
-		eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
+		eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL);
 
 	/* Unmask phy and link status changes interrupts */
-	mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
+	wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
 
 	/* Unmask RX buffer and TX end interrupt */
-	mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
+	wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
 
 	return 0;
 
@@ -1459,7 +1442,7 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
 	struct mv643xx_private *mp = netdev_priv(dev);
 
 	/* Stop Tx Queues */
-	mv643xx_eth_port_disable_tx(mp->port_num);
+	mv643xx_eth_port_disable_tx(mp);
 
 	/* Free outstanding skb's on TX ring */
 	mv643xx_eth_free_all_tx_descs(dev);
@@ -1477,11 +1460,10 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
 static void mv643xx_eth_free_rx_rings(struct net_device *dev)
 {
 	struct mv643xx_private *mp = netdev_priv(dev);
-	unsigned int port_num = mp->port_num;
 	int curr;
 
 	/* Stop RX Queues */
-	mv643xx_eth_port_disable_rx(port_num);
+	mv643xx_eth_port_disable_rx(mp);
 
 	/* Free preallocated skb's on RX rings */
 	for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) {
@@ -1520,9 +1502,9 @@ static int mv643xx_eth_stop(struct net_device *dev)
 	unsigned int port_num = mp->port_num;
 
 	/* Mask all interrupts on ethernet port */
-	mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
+	wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
 	/* wait for previous write to complete */
-	mv_read(INTERRUPT_MASK_REG(port_num));
+	rdl(mp, INTERRUPT_MASK_REG(port_num));
 
 #ifdef MV643XX_NAPI
 	napi_disable(&mp->napi);
@@ -1530,7 +1512,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
 	netif_carrier_off(dev);
 	netif_stop_queue(dev);
 
-	eth_port_reset(mp->port_num);
+	eth_port_reset(mp);
 
 	mv643xx_eth_free_tx_rings(dev);
 	mv643xx_eth_free_rx_rings(dev);
@@ -1561,15 +1543,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget)
 #endif
 
 	work_done = 0;
-	if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
+	if ((rdl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
 	    != (u32) mp->rx_used_desc_q)
 		work_done = mv643xx_eth_receive_queue(dev, budget);
 
 	if (work_done < budget) {
 		netif_rx_complete(dev, napi);
-		mv_write(INTERRUPT_CAUSE_REG(port_num), 0);
-		mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
-		mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
+		wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0);
+		wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
+		wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
 	}
 
 	return work_done;
@@ -1723,7 +1705,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
 
 	/* ensure all descriptors are written before poking hardware */
 	wmb();
-	mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED);
+	mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED);
 
 	mp->tx_desc_count += nr_frags + 1;
 }
@@ -1739,25 +1721,23 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	unsigned long flags;
 
 	BUG_ON(netif_queue_stopped(dev));
-	BUG_ON(skb == NULL);
+
+	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
+		stats->tx_dropped++;
+		printk(KERN_DEBUG "%s: failed to linearize tiny "
+				"unaligned fragment\n", dev->name);
+		return NETDEV_TX_BUSY;
+	}
+
+	spin_lock_irqsave(&mp->lock, flags);
 
 	if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) {
 		printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
 		netif_stop_queue(dev);
-		return 1;
-	}
-
-	if (has_tiny_unaligned_frags(skb)) {
-		if (__skb_linearize(skb)) {
-			stats->tx_dropped++;
-			printk(KERN_DEBUG "%s: failed to linearize tiny "
-					"unaligned fragment\n", dev->name);
-			return 1;
-		}
+		spin_unlock_irqrestore(&mp->lock, flags);
+		return NETDEV_TX_BUSY;
 	}
 
-	spin_lock_irqsave(&mp->lock, flags);
-
 	eth_tx_submit_descs_for_skb(mp, skb);
 	stats->tx_bytes += skb->len;
 	stats->tx_packets++;
@@ -1768,7 +1748,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	spin_unlock_irqrestore(&mp->lock, flags);
 
-	return 0;		/* success */
+	return NETDEV_TX_OK;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1777,13 +1757,13 @@ static void mv643xx_netpoll(struct net_device *netdev)
 	struct mv643xx_private *mp = netdev_priv(netdev);
 	int port_num = mp->port_num;
 
-	mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
+	wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
 	/* wait for previous write to complete */
-	mv_read(INTERRUPT_MASK_REG(port_num));
+	rdl(mp, INTERRUPT_MASK_REG(port_num));
 
 	mv643xx_eth_int_handler(netdev->irq, netdev);
 
-	mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
+	wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
 }
 #endif
 
@@ -1900,7 +1880,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 	port_num = mp->port_num = pd->port_number;
 
 	/* set default config values */
-	eth_port_uc_addr_get(port_num, dev->dev_addr);
+	eth_port_uc_addr_get(mp, dev->dev_addr);
 	mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
 	mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
 
@@ -1908,7 +1888,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 		memcpy(dev->dev_addr, pd->mac_addr, 6);
 
 	if (pd->phy_addr || pd->force_phy_addr)
-		ethernet_phy_set(port_num, pd->phy_addr);
+		ethernet_phy_set(mp, pd->phy_addr);
 
 	if (pd->rx_queue_size)
 		mp->rx_ring_size = pd->rx_queue_size;
@@ -1933,19 +1913,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 	mp->mii.dev = dev;
 	mp->mii.mdio_read = mv643xx_mdio_read;
 	mp->mii.mdio_write = mv643xx_mdio_write;
-	mp->mii.phy_id = ethernet_phy_get(port_num);
+	mp->mii.phy_id = ethernet_phy_get(mp);
 	mp->mii.phy_id_mask = 0x3f;
 	mp->mii.reg_num_mask = 0x1f;
 
-	err = ethernet_phy_detect(port_num);
+	err = ethernet_phy_detect(mp);
 	if (err) {
-		pr_debug("MV643xx ethernet port %d: "
-					"No PHY detected at addr %d\n",
-					port_num, ethernet_phy_get(port_num));
+		pr_debug("%s: No PHY detected at addr %d\n",
+				dev->name, ethernet_phy_get(mp));
 		goto out;
 	}
 
-	ethernet_phy_reset(port_num);
+	ethernet_phy_reset(mp);
 	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
 	mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
 	mv643xx_eth_update_pscr(dev, &cmd);
@@ -2006,9 +1985,11 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
 
 static int mv643xx_eth_shared_probe(struct platform_device *pdev)
 {
+	static int mv643xx_version_printed = 0;
 	struct resource *res;
 
-	printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
+	if (!mv643xx_version_printed++)
+		printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res == NULL)
@@ -2037,10 +2018,10 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
 	unsigned int port_num = mp->port_num;
 
 	/* Mask all interrupts on ethernet port */
-	mv_write(INTERRUPT_MASK_REG(port_num), 0);
-	mv_read (INTERRUPT_MASK_REG(port_num));
+	wrl(mp, INTERRUPT_MASK_REG(port_num), 0);
+	rdl(mp, INTERRUPT_MASK_REG(port_num));
 
-	eth_port_reset(port_num);
+	eth_port_reset(mp);
 }
 
 static struct platform_driver mv643xx_eth_driver = {
@@ -2229,12 +2210,9 @@ MODULE_ALIAS("platform:mv643xx_eth");
  *		return_info	Tx/Rx user resource return information.
  */
 
-/* PHY routines */
-static int ethernet_phy_get(unsigned int eth_port_num);
-static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
-
 /* Ethernet Port routines */
-static void eth_port_set_filter_table_entry(int table, unsigned char entry);
+static void eth_port_set_filter_table_entry(struct mv643xx_private *mp,
+					    int table, unsigned char entry);
 
 /*
  * eth_port_init - Initialize the Ethernet port driver
@@ -2264,9 +2242,9 @@ static void eth_port_init(struct mv643xx_private *mp)
 {
 	mp->rx_resource_err = 0;
 
-	eth_port_reset(mp->port_num);
+	eth_port_reset(mp);
 
-	eth_port_init_mac_tables(mp->port_num);
+	eth_port_init_mac_tables(mp);
 }
 
 /*
@@ -2306,28 +2284,28 @@ static void eth_port_start(struct net_device *dev)
 
 	/* Assignment of Tx CTRP of given queue */
 	tx_curr_desc = mp->tx_curr_desc_q;
-	mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
+	wrl(mp, TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
 		(u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
 
 	/* Assignment of Rx CRDP of given queue */
 	rx_curr_desc = mp->rx_curr_desc_q;
-	mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
+	wrl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
 		(u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
 
 	/* Add the assigned Ethernet address to the port's address table */
-	eth_port_uc_addr_set(port_num, dev->dev_addr);
+	eth_port_uc_addr_set(mp, dev->dev_addr);
 
 	/* Assign port configuration and command. */
-	mv_write(PORT_CONFIG_REG(port_num),
+	wrl(mp, PORT_CONFIG_REG(port_num),
 			  PORT_CONFIG_DEFAULT_VALUE);
 
-	mv_write(PORT_CONFIG_EXTEND_REG(port_num),
+	wrl(mp, PORT_CONFIG_EXTEND_REG(port_num),
 			  PORT_CONFIG_EXTEND_DEFAULT_VALUE);
 
-	pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
+	pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
 
 	pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
-	mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
+	wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
 
 	pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
 		DISABLE_AUTO_NEG_SPEED_GMII    |
@@ -2335,32 +2313,34 @@ static void eth_port_start(struct net_device *dev)
 		DO_NOT_FORCE_LINK_FAIL	   |
 		SERIAL_PORT_CONTROL_RESERVED;
 
-	mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
+	wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
 
 	pscr |= SERIAL_PORT_ENABLE;
-	mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr);
+	wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
 
 	/* Assign port SDMA configuration */
-	mv_write(SDMA_CONFIG_REG(port_num),
+	wrl(mp, SDMA_CONFIG_REG(port_num),
 			  PORT_SDMA_CONFIG_DEFAULT_VALUE);
 
 	/* Enable port Rx. */
-	mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED);
+	mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED);
 
 	/* Disable port bandwidth limits by clearing MTU register */
-	mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0);
+	wrl(mp, MAXIMUM_TRANSMIT_UNIT(port_num), 0);
 
 	/* save phy settings across reset */
 	mv643xx_get_settings(dev, &ethtool_cmd);
-	ethernet_phy_reset(mp->port_num);
+	ethernet_phy_reset(mp);
 	mv643xx_set_settings(dev, &ethtool_cmd);
 }
 
 /*
  * eth_port_uc_addr_set - Write a MAC address into the port's hw registers
  */
-static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr)
+static void eth_port_uc_addr_set(struct mv643xx_private *mp,
+				 unsigned char *p_addr)
 {
+	unsigned int port_num = mp->port_num;
 	unsigned int mac_h;
 	unsigned int mac_l;
 	int table;
@@ -2369,24 +2349,26 @@ static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr)
 	mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
 							(p_addr[3] << 0);
 
-	mv_write(MAC_ADDR_LOW(port_num), mac_l);
-	mv_write(MAC_ADDR_HIGH(port_num), mac_h);
+	wrl(mp, MAC_ADDR_LOW(port_num), mac_l);
+	wrl(mp, MAC_ADDR_HIGH(port_num), mac_h);
 
 	/* Accept frames with this address */
 	table = DA_FILTER_UNICAST_TABLE_BASE(port_num);
-	eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f);
+	eth_port_set_filter_table_entry(mp, table, p_addr[5] & 0x0f);
 }
 
 /*
  * eth_port_uc_addr_get - Read the MAC address from the port's hw registers
  */
-static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr)
+static void eth_port_uc_addr_get(struct mv643xx_private *mp,
+				 unsigned char *p_addr)
 {
+	unsigned int port_num = mp->port_num;
 	unsigned int mac_h;
 	unsigned int mac_l;
 
-	mac_h = mv_read(MAC_ADDR_HIGH(port_num));
-	mac_l = mv_read(MAC_ADDR_LOW(port_num));
+	mac_h = rdl(mp, MAC_ADDR_HIGH(port_num));
+	mac_l = rdl(mp, MAC_ADDR_LOW(port_num));
 
 	p_addr[0] = (mac_h >> 24) & 0xff;
 	p_addr[1] = (mac_h >> 16) & 0xff;
@@ -2405,7 +2387,8 @@ static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr)
  *	3-1	Queue			(ETH_Q0=0)
  *	7-4	Reserved = 0;
  */
-static void eth_port_set_filter_table_entry(int table, unsigned char entry)
+static void eth_port_set_filter_table_entry(struct mv643xx_private *mp,
+					    int table, unsigned char entry)
 {
 	unsigned int table_reg;
 	unsigned int tbl_offset;
@@ -2415,9 +2398,9 @@ static void eth_port_set_filter_table_entry(int table, unsigned char entry)
 	reg_offset = entry % 4;		/* Entry offset within the register */
 
 	/* Set "accepts frame bit" at specified table entry */
-	table_reg = mv_read(table + tbl_offset);
+	table_reg = rdl(mp, table + tbl_offset);
 	table_reg |= 0x01 << (8 * reg_offset);
-	mv_write(table + tbl_offset, table_reg);
+	wrl(mp, table + tbl_offset, table_reg);
 }
 
 /*
@@ -2434,8 +2417,9 @@ static void eth_port_set_filter_table_entry(int table, unsigned char entry)
  * In either case, eth_port_set_filter_table_entry() is then called
  * to set to set the actual table entry.
  */
-static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
+static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr)
 {
+	unsigned int port_num = mp->port_num;
 	unsigned int mac_h;
 	unsigned int mac_l;
 	unsigned char crc_result = 0;
@@ -2446,9 +2430,8 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
 
 	if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
 	    (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
-		table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
-					(eth_port_num);
-		eth_port_set_filter_table_entry(table, p_addr[5]);
+		table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num);
+		eth_port_set_filter_table_entry(mp, table, p_addr[5]);
 		return;
 	}
 
@@ -2520,8 +2503,8 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
 	for (i = 0; i < 8; i++)
 		crc_result = crc_result | (crc[i] << i);
 
-	table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
-	eth_port_set_filter_table_entry(table, crc_result);
+	table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num);
+	eth_port_set_filter_table_entry(mp, table, crc_result);
 }
 
 /*
@@ -2550,7 +2533,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
 			 * 3-1  Queue	 ETH_Q0=0
 			 * 7-4  Reserved = 0;
 			 */
-			mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
+			wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
 
 			/* Set all entries in DA filter other multicast
 			 * table (Ex_dFOMT)
@@ -2560,7 +2543,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
 			 * 3-1  Queue	 ETH_Q0=0
 			 * 7-4  Reserved = 0;
 			 */
-			mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
+			wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
 		}
 		return;
 	}
@@ -2570,11 +2553,11 @@ static void eth_port_set_multicast_list(struct net_device *dev)
 	 */
 	for (table_index = 0; table_index <= 0xFC; table_index += 4) {
 		/* Clear DA filter special multicast table (Ex_dFSMT) */
-		mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+		wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
 				(eth_port_num) + table_index, 0);
 
 		/* Clear DA filter other multicast table (Ex_dFOMT) */
-		mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE
+		wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE
 				(eth_port_num) + table_index, 0);
 	}
 
@@ -2583,7 +2566,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
 			(i < 256) && (mc_list != NULL) && (i < dev->mc_count);
 			i++, mc_list = mc_list->next)
 		if (mc_list->dmi_addrlen == 6)
-			eth_port_mc_addr(eth_port_num, mc_list->dmi_addr);
+			eth_port_mc_addr(mp, mc_list->dmi_addr);
 }
 
 /*
@@ -2594,7 +2577,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
  *	Other Multicast) and set each entry to 0.
  *
  * INPUT:
- *	unsigned int	eth_port_num	Ethernet Port number.
+ *	struct mv643xx_private *mp	Ethernet Port.
  *
  * OUTPUT:
  *	Multicast and Unicast packets are rejected.
@@ -2602,22 +2585,23 @@ static void eth_port_set_multicast_list(struct net_device *dev)
  * RETURN:
  *	None.
  */
-static void eth_port_init_mac_tables(unsigned int eth_port_num)
+static void eth_port_init_mac_tables(struct mv643xx_private *mp)
 {
+	unsigned int port_num = mp->port_num;
 	int table_index;
 
 	/* Clear DA filter unicast table (Ex_dFUT) */
 	for (table_index = 0; table_index <= 0xC; table_index += 4)
-		mv_write(DA_FILTER_UNICAST_TABLE_BASE
-					(eth_port_num) + table_index, 0);
+		wrl(mp, DA_FILTER_UNICAST_TABLE_BASE(port_num) +
+					table_index, 0);
 
 	for (table_index = 0; table_index <= 0xFC; table_index += 4) {
 		/* Clear DA filter special multicast table (Ex_dFSMT) */
-		mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
-					(eth_port_num) + table_index, 0);
+		wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) +
+					table_index, 0);
 		/* Clear DA filter other multicast table (Ex_dFOMT) */
-		mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE
-					(eth_port_num) + table_index, 0);
+		wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) +
+					table_index, 0);
 	}
 }
 
@@ -2629,7 +2613,7 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
  *	A read from the MIB counter will reset the counter.
  *
  * INPUT:
- *	unsigned int	eth_port_num	Ethernet Port number.
+ *	struct mv643xx_private *mp	Ethernet Port.
  *
  * OUTPUT:
  *	After reading all MIB counters, the counters resets.
@@ -2638,19 +2622,20 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
  *	MIB counter value.
  *
  */
-static void eth_clear_mib_counters(unsigned int eth_port_num)
+static void eth_clear_mib_counters(struct mv643xx_private *mp)
 {
+	unsigned int port_num = mp->port_num;
 	int i;
 
 	/* Perform dummy reads from MIB counters */
 	for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
 									i += 4)
-		mv_read(MIB_COUNTERS_BASE(eth_port_num) + i);
+		rdl(mp, MIB_COUNTERS_BASE(port_num) + i);
 }
 
 static inline u32 read_mib(struct mv643xx_private *mp, int offset)
 {
-	return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset);
+	return rdl(mp, MIB_COUNTERS_BASE(mp->port_num) + offset);
 }
 
 static void eth_update_mib_counters(struct mv643xx_private *mp)
@@ -2686,7 +2671,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
  *	the specified port.
  *
  * INPUT:
- *	unsigned int	eth_port_num	Ethernet Port number.
+ *	struct mv643xx_private *mp	Ethernet Port.
  *
  * OUTPUT:
  *	None
@@ -2696,22 +2681,22 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
  *	-ENODEV on failure
  *
  */
-static int ethernet_phy_detect(unsigned int port_num)
+static int ethernet_phy_detect(struct mv643xx_private *mp)
 {
 	unsigned int phy_reg_data0;
 	int auto_neg;
 
-	eth_port_read_smi_reg(port_num, 0, &phy_reg_data0);
+	eth_port_read_smi_reg(mp, 0, &phy_reg_data0);
 	auto_neg = phy_reg_data0 & 0x1000;
 	phy_reg_data0 ^= 0x1000;	/* invert auto_neg */
-	eth_port_write_smi_reg(port_num, 0, phy_reg_data0);
+	eth_port_write_smi_reg(mp, 0, phy_reg_data0);
 
-	eth_port_read_smi_reg(port_num, 0, &phy_reg_data0);
+	eth_port_read_smi_reg(mp, 0, &phy_reg_data0);
 	if ((phy_reg_data0 & 0x1000) == auto_neg)
 		return -ENODEV;				/* change didn't take */
 
 	phy_reg_data0 ^= 0x1000;
-	eth_port_write_smi_reg(port_num, 0, phy_reg_data0);
+	eth_port_write_smi_reg(mp, 0, phy_reg_data0);
 	return 0;
 }
 
@@ -2722,7 +2707,7 @@ static int ethernet_phy_detect(unsigned int port_num)
  *	This routine returns the given ethernet port PHY address.
  *
  * INPUT:
- *	unsigned int	eth_port_num	Ethernet Port number.
+ *	struct mv643xx_private *mp	Ethernet Port.
  *
  * OUTPUT:
  *	None.
@@ -2731,13 +2716,13 @@ static int ethernet_phy_detect(unsigned int port_num)
  *	PHY address.
  *
  */
-static int ethernet_phy_get(unsigned int eth_port_num)
+static int ethernet_phy_get(struct mv643xx_private *mp)
 {
 	unsigned int reg_data;
 
-	reg_data = mv_read(PHY_ADDR_REG);
+	reg_data = rdl(mp, PHY_ADDR_REG);
 
-	return ((reg_data >> (5 * eth_port_num)) & 0x1f);
+	return ((reg_data >> (5 * mp->port_num)) & 0x1f);
 }
 
 /*
@@ -2747,7 +2732,7 @@ static int ethernet_phy_get(unsigned int eth_port_num)
  *	This routine sets the given ethernet port PHY address.
  *
  * INPUT:
- *	unsigned int	eth_port_num	Ethernet Port number.
+ *	struct mv643xx_private *mp	Ethernet Port.
  *	int		phy_addr	PHY address.
  *
  * OUTPUT:
@@ -2757,15 +2742,15 @@ static int ethernet_phy_get(unsigned int eth_port_num)
  *	None.
  *
  */
-static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
+static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr)
 {
 	u32 reg_data;
-	int addr_shift = 5 * eth_port_num;
+	int addr_shift = 5 * mp->port_num;
 
-	reg_data = mv_read(PHY_ADDR_REG);
+	reg_data = rdl(mp, PHY_ADDR_REG);
 	reg_data &= ~(0x1f << addr_shift);
 	reg_data |= (phy_addr & 0x1f) << addr_shift;
-	mv_write(PHY_ADDR_REG, reg_data);
+	wrl(mp, PHY_ADDR_REG, reg_data);
 }
 
 /*
@@ -2775,7 +2760,7 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
  *	This routine utilizes the SMI interface to reset the ethernet port PHY.
  *
  * INPUT:
- *	unsigned int	eth_port_num	Ethernet Port number.
+ *	struct mv643xx_private *mp	Ethernet Port.
  *
  * OUTPUT:
  *	The PHY is reset.
@@ -2784,51 +2769,52 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
  *	None.
  *
  */
-static void ethernet_phy_reset(unsigned int eth_port_num)
+static void ethernet_phy_reset(struct mv643xx_private *mp)
 {
 	unsigned int phy_reg_data;
 
 	/* Reset the PHY */
-	eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
+	eth_port_read_smi_reg(mp, 0, &phy_reg_data);
 	phy_reg_data |= 0x8000;	/* Set bit 15 to reset the PHY */
-	eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data);
+	eth_port_write_smi_reg(mp, 0, phy_reg_data);
 
 	/* wait for PHY to come out of reset */
 	do {
 		udelay(1);
-		eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
+		eth_port_read_smi_reg(mp, 0, &phy_reg_data);
 	} while (phy_reg_data & 0x8000);
 }
 
-static void mv643xx_eth_port_enable_tx(unsigned int port_num,
+static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
 					unsigned int queues)
 {
-	mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), queues);
+	wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues);
 }
 
-static void mv643xx_eth_port_enable_rx(unsigned int port_num,
+static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
 					unsigned int queues)
 {
-	mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), queues);
+	wrl(mp, RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues);
 }
 
-static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num)
+static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp)
 {
+	unsigned int port_num = mp->port_num;
 	u32 queues;
 
 	/* Stop Tx port activity. Check port Tx activity. */
-	queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF;
+	queues = rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF;
 	if (queues) {
 		/* Issue stop command for active queues only */
-		mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8));
+		wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8));
 
 		/* Wait for all Tx activity to terminate. */
 		/* Check port cause register that all Tx queues are stopped */
-		while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF)
+		while (rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF)
 			udelay(PHY_WAIT_MICRO_SECONDS);
 
 		/* Wait for Tx FIFO to empty */
-		while (mv_read(PORT_STATUS_REG(port_num)) &
+		while (rdl(mp, PORT_STATUS_REG(port_num)) &
 							ETH_PORT_TX_FIFO_EMPTY)
 			udelay(PHY_WAIT_MICRO_SECONDS);
 	}
@@ -2836,19 +2822,20 @@ static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num)
 	return queues;
 }
 
-static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
+static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp)
 {
+	unsigned int port_num = mp->port_num;
 	u32 queues;
 
 	/* Stop Rx port activity. Check port Rx activity. */
-	queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF;
+	queues = rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF;
 	if (queues) {
 		/* Issue stop command for active queues only */
-		mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8));
+		wrl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8));
 
 		/* Wait for all Rx activity to terminate. */
 		/* Check port cause register that all Rx queues are stopped */
-		while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF)
+		while (rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF)
 			udelay(PHY_WAIT_MICRO_SECONDS);
 	}
 
@@ -2864,7 +2851,7 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
  *	idle state after this command is performed and the port is disabled.
  *
  * INPUT:
- *	unsigned int	eth_port_num	Ethernet Port number.
+ *	struct mv643xx_private *mp	Ethernet Port.
  *
  * OUTPUT:
  *	Channel activity is halted.
@@ -2873,22 +2860,23 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
  *	None.
  *
  */
-static void eth_port_reset(unsigned int port_num)
+static void eth_port_reset(struct mv643xx_private *mp)
 {
+	unsigned int port_num = mp->port_num;
 	unsigned int reg_data;
 
-	mv643xx_eth_port_disable_tx(port_num);
-	mv643xx_eth_port_disable_rx(port_num);
+	mv643xx_eth_port_disable_tx(mp);
+	mv643xx_eth_port_disable_rx(mp);
 
 	/* Clear all MIB counters */
-	eth_clear_mib_counters(port_num);
+	eth_clear_mib_counters(mp);
 
 	/* Reset the Enable bit in the Configuration Register */
-	reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num));
+	reg_data = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
 	reg_data &= ~(SERIAL_PORT_ENABLE		|
 			DO_NOT_FORCE_LINK_FAIL	|
 			FORCE_LINK_PASS);
-	mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data);
+	wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), reg_data);
 }
 
 
@@ -2900,7 +2888,7 @@ static void eth_port_reset(unsigned int port_num)
  *	order to perform PHY register read.
  *
  * INPUT:
- *	unsigned int	port_num	Ethernet Port number.
+ *	struct mv643xx_private *mp	Ethernet Port.
  *	unsigned int	phy_reg		PHY register address offset.
  *	unsigned int	*value		Register value buffer.
  *
@@ -2912,10 +2900,10 @@ static void eth_port_reset(unsigned int port_num)
  *	true otherwise.
  *
  */
-static void eth_port_read_smi_reg(unsigned int port_num,
+static void eth_port_read_smi_reg(struct mv643xx_private *mp,
 				unsigned int phy_reg, unsigned int *value)
 {
-	int phy_addr = ethernet_phy_get(port_num);
+	int phy_addr = ethernet_phy_get(mp);
 	unsigned long flags;
 	int i;
 
@@ -2923,27 +2911,27 @@ static void eth_port_read_smi_reg(unsigned int port_num,
 	spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
 
 	/* wait for the SMI register to become available */
-	for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) {
+	for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) {
 		if (i == PHY_WAIT_ITERATIONS) {
-			printk("mv643xx PHY busy timeout, port %d\n", port_num);
+			printk("%s: PHY busy timeout\n", mp->dev->name);
 			goto out;
 		}
 		udelay(PHY_WAIT_MICRO_SECONDS);
 	}
 
-	mv_write(SMI_REG,
+	wrl(mp, SMI_REG,
 		(phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
 
 	/* now wait for the data to be valid */
-	for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) {
+	for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) {
 		if (i == PHY_WAIT_ITERATIONS) {
-			printk("mv643xx PHY read timeout, port %d\n", port_num);
+			printk("%s: PHY read timeout\n", mp->dev->name);
 			goto out;
 		}
 		udelay(PHY_WAIT_MICRO_SECONDS);
 	}
 
-	*value = mv_read(SMI_REG) & 0xffff;
+	*value = rdl(mp, SMI_REG) & 0xffff;
 out:
 	spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
 }
@@ -2956,7 +2944,7 @@ out:
  *	order to perform writes to PHY registers.
  *
  * INPUT:
- *	unsigned int	eth_port_num	Ethernet Port number.
+ *	struct mv643xx_private *mp	Ethernet Port.
  *	unsigned int	phy_reg		PHY register address offset.
  *	unsigned int	value		Register value.
  *
@@ -2968,29 +2956,28 @@ out:
  *	true otherwise.
  *
  */
-static void eth_port_write_smi_reg(unsigned int eth_port_num,
+static void eth_port_write_smi_reg(struct mv643xx_private *mp,
 				   unsigned int phy_reg, unsigned int value)
 {
 	int phy_addr;
 	int i;
 	unsigned long flags;
 
-	phy_addr = ethernet_phy_get(eth_port_num);
+	phy_addr = ethernet_phy_get(mp);
 
 	/* the SMI register is a shared resource */
 	spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
 
 	/* wait for the SMI register to become available */
-	for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) {
+	for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) {
 		if (i == PHY_WAIT_ITERATIONS) {
-			printk("mv643xx PHY busy timeout, port %d\n",
-								eth_port_num);
+			printk("%s: PHY busy timeout\n", mp->dev->name);
 			goto out;
 		}
 		udelay(PHY_WAIT_MICRO_SECONDS);
 	}
 
-	mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
+	wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
 				ETH_SMI_OPCODE_WRITE | (value & 0xffff));
 out:
 	spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
@@ -3001,17 +2988,17 @@ out:
  */
 static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location)
 {
-	int val;
 	struct mv643xx_private *mp = netdev_priv(dev);
+	int val;
 
-	eth_port_read_smi_reg(mp->port_num, location, &val);
+	eth_port_read_smi_reg(mp, location, &val);
 	return val;
 }
 
 static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val)
 {
 	struct mv643xx_private *mp = netdev_priv(dev);
-	eth_port_write_smi_reg(mp->port_num, location, val);
+	eth_port_write_smi_reg(mp, location, val);
 }
 
 /*
@@ -3156,7 +3143,7 @@ struct mv643xx_stats {
 	int stat_offset;
 };
 
-#define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \
+#define MV643XX_STAT(m) FIELD_SIZEOF(struct mv643xx_private, m), \
 					offsetof(struct mv643xx_private, m)
 
 static const struct mv643xx_stats mv643xx_gstrings_stats[] = {

+ 11 - 9
drivers/net/natsemi.c

@@ -511,10 +511,10 @@ enum PhyCtrl_bits {
 /* Note that using only 32 bit fields simplifies conversion to big-endian
    architectures. */
 struct netdev_desc {
-	u32 next_desc;
-	s32 cmd_status;
-	u32 addr;
-	u32 software_use;
+	__le32 next_desc;
+	__le32 cmd_status;
+	__le32 addr;
+	__le32 software_use;
 };
 
 /* Bits in network_desc.status */
@@ -786,7 +786,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
 	struct netdev_private *np;
 	int i, option, irq, chip_idx = ent->driver_data;
 	static int find_cnt = -1;
-	unsigned long iostart, iosize;
+	resource_size_t iostart;
+	unsigned long iosize;
 	void __iomem *ioaddr;
 	const int pcibar = 1; /* PCI base address register */
 	int prev_eedata;
@@ -946,10 +947,11 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
 		goto err_create_file;
 
 	if (netif_msg_drv(np)) {
-		printk(KERN_INFO "natsemi %s: %s at %#08lx "
+		printk(KERN_INFO "natsemi %s: %s at %#08llx "
 		       "(%s), %s, IRQ %d",
-		       dev->name, natsemi_pci_info[chip_idx].name, iostart,
-		       pci_name(np->pci_dev), print_mac(mac, dev->dev_addr), irq);
+		       dev->name, natsemi_pci_info[chip_idx].name,
+		       (unsigned long long)iostart, pci_name(np->pci_dev),
+		       print_mac(mac, dev->dev_addr), irq);
 		if (dev->if_port == PORT_TP)
 			printk(", port TP.\n");
 		else if (np->ignore_phy)
@@ -2018,7 +2020,7 @@ static void drain_rx(struct net_device *dev)
 	/* Free all the skbuffs in the Rx queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		np->rx_ring[i].cmd_status = 0;
-		np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+		np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
 		if (np->rx_skbuff[i]) {
 			pci_unmap_single(np->pci_dev,
 				np->rx_dma[i], buflen,

+ 0 - 18
drivers/net/netxen/netxen_nic.h

@@ -95,23 +95,6 @@
 
 #define ADDR_IN_WINDOW1(off)	\
 	((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
-/*
- * In netxen_nic_down(), we must wait for any pending callback requests into
- * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
- * reenabled right after it is deleted in netxen_nic_down(). FLUSH_SCHEDULED_WORK()
- * does this synchronization.
- *
- * Normally, schedule_work()/flush_scheduled_work() could have worked, but
- * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
- * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
- * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
- * linkwatch_event() to be executed which also attempts to acquire the rtnl
- * lock thus causing a deadlock.
- */
-
-#define SCHEDULE_WORK(tp)	queue_work(netxen_workq, tp)
-#define FLUSH_SCHEDULED_WORK()	flush_workqueue(netxen_workq)
-extern struct workqueue_struct *netxen_workq;
 
 /*
  * normalize a 64MB crb address to 32MB PCI window
@@ -1050,7 +1033,6 @@ void netxen_halt_pegs(struct netxen_adapter *adapter);
 int netxen_rom_se(struct netxen_adapter *adapter, int addr);
 
 /* Functions from netxen_nic_isr.c */
-int netxen_nic_link_ok(struct netxen_adapter *adapter);
 void netxen_initialize_adapter_sw(struct netxen_adapter *adapter);
 void netxen_initialize_adapter_hw(struct netxen_adapter *adapter);
 void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,

+ 2 - 0
drivers/net/netxen/netxen_nic_isr.c

@@ -172,6 +172,7 @@ void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter)
 	netxen_nic_isr_other(adapter);
 }
 
+#if 0
 int netxen_nic_link_ok(struct netxen_adapter *adapter)
 {
 	switch (adapter->ahw.board_type) {
@@ -189,6 +190,7 @@ int netxen_nic_link_ok(struct netxen_adapter *adapter)
 
 	return 0;
 }
+#endif  /*  0  */
 
 void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter)
 {

+ 18 - 1
drivers/net/netxen/netxen_nic_main.c

@@ -86,7 +86,24 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
 
 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
 
-struct workqueue_struct *netxen_workq;
+/*
+ * In netxen_nic_down(), we must wait for any pending callback requests into
+ * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
+ * reenabled right after it is deleted in netxen_nic_down().
+ * FLUSH_SCHEDULED_WORK()  does this synchronization.
+ *
+ * Normally, schedule_work()/flush_scheduled_work() could have worked, but
+ * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
+ * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
+ * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
+ * linkwatch_event() to be executed which also attempts to acquire the rtnl
+ * lock thus causing a deadlock.
+ */
+
+static struct workqueue_struct *netxen_workq;
+#define SCHEDULE_WORK(tp)	queue_work(netxen_workq, tp)
+#define FLUSH_SCHEDULED_WORK()	flush_workqueue(netxen_workq)
+
 static void netxen_watchdog(unsigned long);
 
 static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików