|
@@ -92,7 +92,6 @@ static int msglevel = MSG_LEVEL_INFO;
|
|
|
* Fetch the mask bits of the selected CAM and store them into the
|
|
|
* provided mask buffer.
|
|
|
*/
|
|
|
-
|
|
|
static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
|
|
|
{
|
|
|
int i;
|
|
@@ -121,7 +120,6 @@ static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
|
|
|
*
|
|
|
* Store a new mask into a CAM
|
|
|
*/
|
|
|
-
|
|
|
static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
|
|
|
{
|
|
|
int i;
|
|
@@ -166,7 +164,6 @@ static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
|
|
|
*
|
|
|
* Load an address or vlan tag into a CAM
|
|
|
*/
|
|
|
-
|
|
|
static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
|
|
|
{
|
|
|
int i;
|
|
@@ -222,7 +219,6 @@ static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
|
|
|
* reset the Wake on lan features. This function doesn't restore
|
|
|
* the rest of the logic from the result of sleep/wakeup
|
|
|
*/
|
|
|
-
|
|
|
static void mac_wol_reset(struct mac_regs __iomem *regs)
|
|
|
{
|
|
|
|
|
@@ -241,7 +237,6 @@ static void mac_wol_reset(struct mac_regs __iomem *regs)
|
|
|
writew(0xFFFF, ®s->WOLSRClr);
|
|
|
}
|
|
|
|
|
|
-static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
|
|
|
static const struct ethtool_ops velocity_ethtool_ops;
|
|
|
|
|
|
/*
|
|
@@ -369,76 +364,14 @@ static int rx_copybreak = 200;
|
|
|
module_param(rx_copybreak, int, 0644);
|
|
|
MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
|
|
|
|
|
|
-static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
|
|
|
- const struct velocity_info_tbl *info);
|
|
|
-static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev);
|
|
|
-static void velocity_print_info(struct velocity_info *vptr);
|
|
|
-static int velocity_open(struct net_device *dev);
|
|
|
-static int velocity_change_mtu(struct net_device *dev, int mtu);
|
|
|
-static int velocity_xmit(struct sk_buff *skb, struct net_device *dev);
|
|
|
-static irqreturn_t velocity_intr(int irq, void *dev_instance);
|
|
|
-static void velocity_set_multi(struct net_device *dev);
|
|
|
-static struct net_device_stats *velocity_get_stats(struct net_device *dev);
|
|
|
-static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
|
|
-static int velocity_close(struct net_device *dev);
|
|
|
-static int velocity_receive_frame(struct velocity_info *, int idx);
|
|
|
-static int velocity_alloc_rx_buf(struct velocity_info *, int idx);
|
|
|
-static void velocity_free_rd_ring(struct velocity_info *vptr);
|
|
|
-static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *);
|
|
|
-static int velocity_soft_reset(struct velocity_info *vptr);
|
|
|
-static void mii_init(struct velocity_info *vptr, u32 mii_status);
|
|
|
-static u32 velocity_get_link(struct net_device *dev);
|
|
|
-static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
|
|
|
-static void velocity_print_link_status(struct velocity_info *vptr);
|
|
|
-static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs);
|
|
|
-static void velocity_shutdown(struct velocity_info *vptr);
|
|
|
-static void enable_flow_control_ability(struct velocity_info *vptr);
|
|
|
-static void enable_mii_autopoll(struct mac_regs __iomem *regs);
|
|
|
-static int velocity_mii_read(struct mac_regs __iomem *, u8 byIdx, u16 *pdata);
|
|
|
-static int velocity_mii_write(struct mac_regs __iomem *, u8 byMiiAddr, u16 data);
|
|
|
-static u32 mii_check_media_mode(struct mac_regs __iomem *regs);
|
|
|
-static u32 check_connection_type(struct mac_regs __iomem *regs);
|
|
|
-static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
|
|
|
-
|
|
|
#ifdef CONFIG_PM
|
|
|
-
|
|
|
-static int velocity_suspend(struct pci_dev *pdev, pm_message_t state);
|
|
|
-static int velocity_resume(struct pci_dev *pdev);
|
|
|
-
|
|
|
static DEFINE_SPINLOCK(velocity_dev_list_lock);
|
|
|
static LIST_HEAD(velocity_dev_list);
|
|
|
-
|
|
|
-#endif
|
|
|
-
|
|
|
-#if defined(CONFIG_PM) && defined(CONFIG_INET)
|
|
|
-
|
|
|
-static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr);
|
|
|
-
|
|
|
-static struct notifier_block velocity_inetaddr_notifier = {
|
|
|
- .notifier_call = velocity_netdev_event,
|
|
|
-};
|
|
|
-
|
|
|
-static void velocity_register_notifier(void)
|
|
|
-{
|
|
|
- register_inetaddr_notifier(&velocity_inetaddr_notifier);
|
|
|
-}
|
|
|
-
|
|
|
-static void velocity_unregister_notifier(void)
|
|
|
-{
|
|
|
- unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
|
|
|
-}
|
|
|
-
|
|
|
-#else
|
|
|
-
|
|
|
-#define velocity_register_notifier() do {} while (0)
|
|
|
-#define velocity_unregister_notifier() do {} while (0)
|
|
|
-
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
|
* Internal board variants. At the moment we have only one
|
|
|
*/
|
|
|
-
|
|
|
static struct velocity_info_tbl chip_info_table[] = {
|
|
|
{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
|
|
|
{ }
|
|
@@ -448,7 +381,6 @@ static struct velocity_info_tbl chip_info_table[] = {
|
|
|
* Describe the PCI device identifiers that we support in this
|
|
|
* device driver. Used for hotplug autoloading.
|
|
|
*/
|
|
|
-
|
|
|
static const struct pci_device_id velocity_id_table[] __devinitdata = {
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
|
|
|
{ }
|
|
@@ -463,7 +395,6 @@ MODULE_DEVICE_TABLE(pci, velocity_id_table);
|
|
|
* Given a chip identifier return a suitable description. Returns
|
|
|
* a pointer a static string valid while the driver is loaded.
|
|
|
*/
|
|
|
-
|
|
|
static const char __devinit *get_chip_name(enum chip_type chip_id)
|
|
|
{
|
|
|
int i;
|
|
@@ -481,7 +412,6 @@ static const char __devinit *get_chip_name(enum chip_type chip_id)
|
|
|
* unload for each active device that is present. Disconnects
|
|
|
* the device from the network layer and frees all the resources
|
|
|
*/
|
|
|
-
|
|
|
static void __devexit velocity_remove1(struct pci_dev *pdev)
|
|
|
{
|
|
|
struct net_device *dev = pci_get_drvdata(pdev);
|
|
@@ -519,7 +449,6 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
|
|
|
* all the verification and checking as well as reporting so that
|
|
|
* we don't duplicate code for each option.
|
|
|
*/
|
|
|
-
|
|
|
static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
|
|
|
{
|
|
|
if (val == -1)
|
|
@@ -548,7 +477,6 @@ static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max,
|
|
|
* all the verification and checking as well as reporting so that
|
|
|
* we don't duplicate code for each option.
|
|
|
*/
|
|
|
-
|
|
|
static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
|
|
|
{
|
|
|
(*opt) &= (~flag);
|
|
@@ -574,7 +502,6 @@ static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag
|
|
|
* Turn the module and command options into a single structure
|
|
|
* for the current device
|
|
|
*/
|
|
|
-
|
|
|
static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
|
|
|
{
|
|
|
|
|
@@ -600,7 +527,6 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
|
|
|
* Initialize the content addressable memory used for filters. Load
|
|
|
* appropriately according to the presence of VLAN
|
|
|
*/
|
|
|
-
|
|
|
static void velocity_init_cam_filter(struct velocity_info *vptr)
|
|
|
{
|
|
|
struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
@@ -673,7 +599,6 @@ static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
|
|
|
* Reset the ownership and status for the receive ring side.
|
|
|
* Hand all the receive queue to the NIC.
|
|
|
*/
|
|
|
-
|
|
|
static void velocity_rx_reset(struct velocity_info *vptr)
|
|
|
{
|
|
|
|
|
@@ -695,883 +620,846 @@ static void velocity_rx_reset(struct velocity_info *vptr)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_init_registers - initialise MAC registers
|
|
|
- * @vptr: velocity to init
|
|
|
- * @type: type of initialisation (hot or cold)
|
|
|
+ * velocity_get_opt_media_mode - get media selection
|
|
|
+ * @vptr: velocity adapter
|
|
|
*
|
|
|
- * Initialise the MAC on a reset or on first set up on the
|
|
|
- * hardware.
|
|
|
+ * Get the media mode stored in EEPROM or module options and load
|
|
|
+ * mii_status accordingly. The requested link state information
|
|
|
+ * is also returned.
|
|
|
*/
|
|
|
-
|
|
|
-static void velocity_init_registers(struct velocity_info *vptr,
|
|
|
- enum velocity_init_type type)
|
|
|
+static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
|
|
|
{
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
- int i, mii_status;
|
|
|
-
|
|
|
- mac_wol_reset(regs);
|
|
|
-
|
|
|
- switch (type) {
|
|
|
- case VELOCITY_INIT_RESET:
|
|
|
- case VELOCITY_INIT_WOL:
|
|
|
-
|
|
|
- netif_stop_queue(vptr->dev);
|
|
|
-
|
|
|
- /*
|
|
|
- * Reset RX to prevent RX pointer not on the 4X location
|
|
|
- */
|
|
|
- velocity_rx_reset(vptr);
|
|
|
- mac_rx_queue_run(regs);
|
|
|
- mac_rx_queue_wake(regs);
|
|
|
-
|
|
|
- mii_status = velocity_get_opt_media_mode(vptr);
|
|
|
- if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
|
|
|
- velocity_print_link_status(vptr);
|
|
|
- if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
|
|
|
- netif_wake_queue(vptr->dev);
|
|
|
- }
|
|
|
-
|
|
|
- enable_flow_control_ability(vptr);
|
|
|
-
|
|
|
- mac_clear_isr(regs);
|
|
|
- writel(CR0_STOP, ®s->CR0Clr);
|
|
|
- writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
|
|
|
- ®s->CR0Set);
|
|
|
+ u32 status = 0;
|
|
|
|
|
|
+ switch (vptr->options.spd_dpx) {
|
|
|
+ case SPD_DPX_AUTO:
|
|
|
+ status = VELOCITY_AUTONEG_ENABLE;
|
|
|
break;
|
|
|
+ case SPD_DPX_100_FULL:
|
|
|
+ status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
|
|
|
+ break;
|
|
|
+ case SPD_DPX_10_FULL:
|
|
|
+ status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
|
|
|
+ break;
|
|
|
+ case SPD_DPX_100_HALF:
|
|
|
+ status = VELOCITY_SPEED_100;
|
|
|
+ break;
|
|
|
+ case SPD_DPX_10_HALF:
|
|
|
+ status = VELOCITY_SPEED_10;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ vptr->mii_status = status;
|
|
|
+ return status;
|
|
|
+}
|
|
|
|
|
|
- case VELOCITY_INIT_COLD:
|
|
|
- default:
|
|
|
- /*
|
|
|
- * Do reset
|
|
|
- */
|
|
|
- velocity_soft_reset(vptr);
|
|
|
- mdelay(5);
|
|
|
-
|
|
|
- mac_eeprom_reload(regs);
|
|
|
- for (i = 0; i < 6; i++)
|
|
|
- writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
|
|
|
-
|
|
|
- /*
|
|
|
- * clear Pre_ACPI bit.
|
|
|
- */
|
|
|
- BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
|
|
|
- mac_set_rx_thresh(regs, vptr->options.rx_thresh);
|
|
|
- mac_set_dma_length(regs, vptr->options.DMA_length);
|
|
|
-
|
|
|
- writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet);
|
|
|
- /*
|
|
|
- * Back off algorithm use original IEEE standard
|
|
|
- */
|
|
|
- BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB);
|
|
|
+/**
|
|
|
+ * safe_disable_mii_autopoll - autopoll off
|
|
|
+ * @regs: velocity registers
|
|
|
+ *
|
|
|
+ * Turn off the autopoll and wait for it to disable on the chip
|
|
|
+ */
|
|
|
+static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
|
|
|
+{
|
|
|
+ u16 ww;
|
|
|
|
|
|
- /*
|
|
|
- * Init CAM filter
|
|
|
- */
|
|
|
- velocity_init_cam_filter(vptr);
|
|
|
+ /* turn off MAUTO */
|
|
|
+ writeb(0, ®s->MIICR);
|
|
|
+ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
|
|
|
+ udelay(1);
|
|
|
+ if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
|
|
|
+ break;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- /*
|
|
|
- * Set packet filter: Receive directed and broadcast address
|
|
|
- */
|
|
|
- velocity_set_multi(vptr->dev);
|
|
|
+/**
|
|
|
+ * enable_mii_autopoll - turn on autopolling
|
|
|
+ * @regs: velocity registers
|
|
|
+ *
|
|
|
+ * Enable the MII link status autopoll feature on the Velocity
|
|
|
+ * hardware. Wait for it to enable.
|
|
|
+ */
|
|
|
+static void enable_mii_autopoll(struct mac_regs __iomem *regs)
|
|
|
+{
|
|
|
+ int ii;
|
|
|
|
|
|
- /*
|
|
|
- * Enable MII auto-polling
|
|
|
- */
|
|
|
- enable_mii_autopoll(regs);
|
|
|
+ writeb(0, &(regs->MIICR));
|
|
|
+ writeb(MIIADR_SWMPL, ®s->MIIADR);
|
|
|
|
|
|
- vptr->int_mask = INT_MASK_DEF;
|
|
|
+ for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
|
|
|
+ udelay(1);
|
|
|
+ if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- writel(vptr->rx.pool_dma, ®s->RDBaseLo);
|
|
|
- writew(vptr->options.numrx - 1, ®s->RDCSize);
|
|
|
- mac_rx_queue_run(regs);
|
|
|
- mac_rx_queue_wake(regs);
|
|
|
+ writeb(MIICR_MAUTO, ®s->MIICR);
|
|
|
|
|
|
- writew(vptr->options.numtx - 1, ®s->TDCSize);
|
|
|
+ for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
|
|
|
+ udelay(1);
|
|
|
+ if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- for (i = 0; i < vptr->tx.numq; i++) {
|
|
|
- writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]);
|
|
|
- mac_tx_queue_run(regs, i);
|
|
|
- }
|
|
|
+}
|
|
|
|
|
|
- init_flow_control_register(vptr);
|
|
|
+/**
|
|
|
+ * velocity_mii_read - read MII data
|
|
|
+ * @regs: velocity registers
|
|
|
+ * @index: MII register index
|
|
|
+ * @data: buffer for received data
|
|
|
+ *
|
|
|
+ * Perform a single read of an MII 16bit register. Returns zero
|
|
|
+ * on success or -ETIMEDOUT if the PHY did not respond.
|
|
|
+ */
|
|
|
+static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
|
|
|
+{
|
|
|
+ u16 ww;
|
|
|
|
|
|
- writel(CR0_STOP, ®s->CR0Clr);
|
|
|
- writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set);
|
|
|
+ /*
|
|
|
+ * Disable MIICR_MAUTO, so that mii addr can be set normally
|
|
|
+ */
|
|
|
+ safe_disable_mii_autopoll(regs);
|
|
|
|
|
|
- mii_status = velocity_get_opt_media_mode(vptr);
|
|
|
- netif_stop_queue(vptr->dev);
|
|
|
+ writeb(index, ®s->MIIADR);
|
|
|
|
|
|
- mii_init(vptr, mii_status);
|
|
|
+ BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR);
|
|
|
|
|
|
- if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
|
|
|
- velocity_print_link_status(vptr);
|
|
|
- if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
|
|
|
- netif_wake_queue(vptr->dev);
|
|
|
- }
|
|
|
+ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
|
|
|
+ if (!(readb(®s->MIICR) & MIICR_RCMD))
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- enable_flow_control_ability(vptr);
|
|
|
- mac_hw_mibs_init(regs);
|
|
|
- mac_write_int_mask(vptr->int_mask, regs);
|
|
|
- mac_clear_isr(regs);
|
|
|
+ *data = readw(®s->MIIDATA);
|
|
|
|
|
|
- }
|
|
|
+ enable_mii_autopoll(regs);
|
|
|
+ if (ww == W_MAX_TIMEOUT)
|
|
|
+ return -ETIMEDOUT;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
/**
|
|
|
- * velocity_soft_reset - soft reset
|
|
|
- * @vptr: velocity to reset
|
|
|
+ * mii_check_media_mode - check media state
|
|
|
+ * @regs: velocity registers
|
|
|
*
|
|
|
- * Kick off a soft reset of the velocity adapter and then poll
|
|
|
- * until the reset sequence has completed before returning.
|
|
|
+ * Check the current MII status and determine the link status
|
|
|
+ * accordingly
|
|
|
*/
|
|
|
-
|
|
|
-static int velocity_soft_reset(struct velocity_info *vptr)
|
|
|
+static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
|
|
|
{
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
- int i = 0;
|
|
|
+ u32 status = 0;
|
|
|
+ u16 ANAR;
|
|
|
|
|
|
- writel(CR0_SFRST, ®s->CR0Set);
|
|
|
+ if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
|
|
|
+ status |= VELOCITY_LINK_FAIL;
|
|
|
|
|
|
- for (i = 0; i < W_MAX_TIMEOUT; i++) {
|
|
|
- udelay(5);
|
|
|
- if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set))
|
|
|
- break;
|
|
|
+ if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
|
|
|
+ status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
|
|
|
+ else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
|
|
|
+ status |= (VELOCITY_SPEED_1000);
|
|
|
+ else {
|
|
|
+ velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
|
|
|
+ if (ANAR & ANAR_TXFD)
|
|
|
+ status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
|
|
|
+ else if (ANAR & ANAR_TX)
|
|
|
+ status |= VELOCITY_SPEED_100;
|
|
|
+ else if (ANAR & ANAR_10FD)
|
|
|
+ status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
|
|
|
+ else
|
|
|
+ status |= (VELOCITY_SPEED_10);
|
|
|
}
|
|
|
|
|
|
- if (i == W_MAX_TIMEOUT) {
|
|
|
- writel(CR0_FORSRST, ®s->CR0Set);
|
|
|
- /* FIXME: PCI POSTING */
|
|
|
- /* delay 2ms */
|
|
|
- mdelay(2);
|
|
|
+ if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
|
|
|
+ velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
|
|
|
+ if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
|
|
|
+ == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
|
|
|
+ if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
|
|
|
+ status |= VELOCITY_AUTONEG_ENABLE;
|
|
|
+ }
|
|
|
}
|
|
|
- return 0;
|
|
|
-}
|
|
|
|
|
|
-static const struct net_device_ops velocity_netdev_ops = {
|
|
|
- .ndo_open = velocity_open,
|
|
|
- .ndo_stop = velocity_close,
|
|
|
- .ndo_start_xmit = velocity_xmit,
|
|
|
- .ndo_get_stats = velocity_get_stats,
|
|
|
- .ndo_validate_addr = eth_validate_addr,
|
|
|
- .ndo_set_mac_address = eth_mac_addr,
|
|
|
- .ndo_set_multicast_list = velocity_set_multi,
|
|
|
- .ndo_change_mtu = velocity_change_mtu,
|
|
|
- .ndo_do_ioctl = velocity_ioctl,
|
|
|
- .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
|
|
|
- .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
|
|
|
- .ndo_vlan_rx_register = velocity_vlan_rx_register,
|
|
|
-};
|
|
|
+ return status;
|
|
|
+}
|
|
|
|
|
|
/**
|
|
|
- * velocity_found1 - set up discovered velocity card
|
|
|
- * @pdev: PCI device
|
|
|
- * @ent: PCI device table entry that matched
|
|
|
+ * velocity_mii_write - write MII data
|
|
|
+ * @regs: velocity registers
|
|
|
+ * @index: MII register index
|
|
|
+ * @data: 16bit data for the MII register
|
|
|
*
|
|
|
- * Configure a discovered adapter from scratch. Return a negative
|
|
|
- * errno error code on failure paths.
|
|
|
+ * Perform a single write to an MII 16bit register. Returns zero
|
|
|
+ * on success or -ETIMEDOUT if the PHY did not respond.
|
|
|
*/
|
|
|
-
|
|
|
-static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
+static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
|
|
|
{
|
|
|
- static int first = 1;
|
|
|
- struct net_device *dev;
|
|
|
- int i;
|
|
|
- const char *drv_string;
|
|
|
- const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
|
|
|
- struct velocity_info *vptr;
|
|
|
- struct mac_regs __iomem *regs;
|
|
|
- int ret = -ENOMEM;
|
|
|
+ u16 ww;
|
|
|
|
|
|
- /* FIXME: this driver, like almost all other ethernet drivers,
|
|
|
- * can support more than MAX_UNITS.
|
|
|
+ /*
|
|
|
+ * Disable MIICR_MAUTO, so that mii addr can be set normally
|
|
|
*/
|
|
|
- if (velocity_nics >= MAX_UNITS) {
|
|
|
- dev_notice(&pdev->dev, "already found %d NICs.\n",
|
|
|
- velocity_nics);
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
+ safe_disable_mii_autopoll(regs);
|
|
|
|
|
|
- dev = alloc_etherdev(sizeof(struct velocity_info));
|
|
|
- if (!dev) {
|
|
|
- dev_err(&pdev->dev, "allocate net device failed.\n");
|
|
|
- goto out;
|
|
|
+ /* MII reg offset */
|
|
|
+ writeb(mii_addr, ®s->MIIADR);
|
|
|
+ /* set MII data */
|
|
|
+ writew(data, ®s->MIIDATA);
|
|
|
+
|
|
|
+ /* turn on MIICR_WCMD */
|
|
|
+ BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR);
|
|
|
+
|
|
|
+ /* W_MAX_TIMEOUT is the timeout period */
|
|
|
+ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
|
|
|
+ udelay(5);
|
|
|
+ if (!(readb(®s->MIICR) & MIICR_WCMD))
|
|
|
+ break;
|
|
|
}
|
|
|
+ enable_mii_autopoll(regs);
|
|
|
|
|
|
- /* Chain it all together */
|
|
|
+ if (ww == W_MAX_TIMEOUT)
|
|
|
+ return -ETIMEDOUT;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
- SET_NETDEV_DEV(dev, &pdev->dev);
|
|
|
- vptr = netdev_priv(dev);
|
|
|
+/**
|
|
|
+ * set_mii_flow_control - flow control setup
|
|
|
+ * @vptr: velocity interface
|
|
|
+ *
|
|
|
+ * Set up the flow control on this interface according to
|
|
|
+ * the supplied user/eeprom options.
|
|
|
+ */
|
|
|
+static void set_mii_flow_control(struct velocity_info *vptr)
|
|
|
+{
|
|
|
+ /*Enable or Disable PAUSE in ANAR */
|
|
|
+ switch (vptr->options.flow_cntl) {
|
|
|
+ case FLOW_CNTL_TX:
|
|
|
+ MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ break;
|
|
|
|
|
|
+ case FLOW_CNTL_RX:
|
|
|
+ MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ break;
|
|
|
|
|
|
- if (first) {
|
|
|
- printk(KERN_INFO "%s Ver. %s\n",
|
|
|
- VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
|
|
|
- printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
|
|
|
- printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
|
|
|
- first = 0;
|
|
|
- }
|
|
|
+ case FLOW_CNTL_TX_RX:
|
|
|
+ MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ break;
|
|
|
|
|
|
- velocity_init_info(pdev, vptr, info);
|
|
|
+ case FLOW_CNTL_DISABLE:
|
|
|
+ MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- vptr->dev = dev;
|
|
|
+/**
|
|
|
+ * mii_set_auto_on - autonegotiate on
|
|
|
+ * @vptr: velocity
|
|
|
+ *
|
|
|
+ * Enable autonegotation on this interface
|
|
|
+ */
|
|
|
+static void mii_set_auto_on(struct velocity_info *vptr)
|
|
|
+{
|
|
|
+ if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
|
|
|
+ MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
|
|
|
+ else
|
|
|
+ MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
|
|
|
+}
|
|
|
|
|
|
- dev->irq = pdev->irq;
|
|
|
+static u32 check_connection_type(struct mac_regs __iomem *regs)
|
|
|
+{
|
|
|
+ u32 status = 0;
|
|
|
+ u8 PHYSR0;
|
|
|
+ u16 ANAR;
|
|
|
+ PHYSR0 = readb(®s->PHYSR0);
|
|
|
|
|
|
- ret = pci_enable_device(pdev);
|
|
|
- if (ret < 0)
|
|
|
- goto err_free_dev;
|
|
|
+ /*
|
|
|
+ if (!(PHYSR0 & PHYSR0_LINKGD))
|
|
|
+ status|=VELOCITY_LINK_FAIL;
|
|
|
+ */
|
|
|
|
|
|
- ret = velocity_get_pci_info(vptr, pdev);
|
|
|
- if (ret < 0) {
|
|
|
- /* error message already printed */
|
|
|
- goto err_disable;
|
|
|
- }
|
|
|
+ if (PHYSR0 & PHYSR0_FDPX)
|
|
|
+ status |= VELOCITY_DUPLEX_FULL;
|
|
|
|
|
|
- ret = pci_request_regions(pdev, VELOCITY_NAME);
|
|
|
- if (ret < 0) {
|
|
|
- dev_err(&pdev->dev, "No PCI resources.\n");
|
|
|
- goto err_disable;
|
|
|
- }
|
|
|
+ if (PHYSR0 & PHYSR0_SPDG)
|
|
|
+ status |= VELOCITY_SPEED_1000;
|
|
|
+ else if (PHYSR0 & PHYSR0_SPD10)
|
|
|
+ status |= VELOCITY_SPEED_10;
|
|
|
+ else
|
|
|
+ status |= VELOCITY_SPEED_100;
|
|
|
|
|
|
- regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
|
|
|
- if (regs == NULL) {
|
|
|
- ret = -EIO;
|
|
|
- goto err_release_res;
|
|
|
+ if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
|
|
|
+ velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
|
|
|
+ if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
|
|
|
+ == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
|
|
|
+ if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
|
|
|
+ status |= VELOCITY_AUTONEG_ENABLE;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- vptr->mac_regs = regs;
|
|
|
-
|
|
|
- mac_wol_reset(regs);
|
|
|
+ return status;
|
|
|
+}
|
|
|
|
|
|
- dev->base_addr = vptr->ioaddr;
|
|
|
|
|
|
- for (i = 0; i < 6; i++)
|
|
|
- dev->dev_addr[i] = readb(®s->PAR[i]);
|
|
|
|
|
|
+/**
|
|
|
+ * velocity_set_media_mode - set media mode
|
|
|
+ * @mii_status: old MII link state
|
|
|
+ *
|
|
|
+ * Check the media link state and configure the flow control
|
|
|
+ * PHY and also velocity hardware setup accordingly. In particular
|
|
|
+ * we need to set up CD polling and frame bursting.
|
|
|
+ */
|
|
|
+static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
|
|
|
+{
|
|
|
+ u32 curr_status;
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
|
|
|
- drv_string = dev_driver_string(&pdev->dev);
|
|
|
+ vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
|
|
|
+ curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
|
|
|
|
|
|
- velocity_get_options(&vptr->options, velocity_nics, drv_string);
|
|
|
+ /* Set mii link status */
|
|
|
+ set_mii_flow_control(vptr);
|
|
|
|
|
|
/*
|
|
|
- * Mask out the options cannot be set to the chip
|
|
|
+ Check if new status is consisent with current status
|
|
|
+ if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
|
|
|
+ || (mii_status==curr_status)) {
|
|
|
+ vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
|
|
|
+ vptr->mii_status=check_connection_type(vptr->mac_regs);
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
*/
|
|
|
|
|
|
- vptr->options.flags &= info->flags;
|
|
|
+ if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
|
|
|
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
|
|
|
|
|
|
/*
|
|
|
- * Enable the chip specified capbilities
|
|
|
+ * If connection type is AUTO
|
|
|
*/
|
|
|
+ if (mii_status & VELOCITY_AUTONEG_ENABLE) {
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
|
|
|
+ /* clear force MAC mode bit */
|
|
|
+ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
|
|
|
+ /* set duplex mode of MAC according to duplex mode of MII */
|
|
|
+ MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
|
|
|
+ MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
|
|
|
|
|
|
- vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
|
|
|
+ /* enable AUTO-NEGO mode */
|
|
|
+ mii_set_auto_on(vptr);
|
|
|
+ } else {
|
|
|
+ u16 ANAR;
|
|
|
+ u8 CHIPGCR;
|
|
|
|
|
|
- vptr->wol_opts = vptr->options.wol_opts;
|
|
|
- vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
|
|
|
+ /*
|
|
|
+ * 1. if it's 3119, disable frame bursting in halfduplex mode
|
|
|
+ * and enable it in fullduplex mode
|
|
|
+ * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
|
|
|
+ * 3. only enable CD heart beat counter in 10HD mode
|
|
|
+ */
|
|
|
|
|
|
- vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
|
|
|
+ /* set force MAC mode bit */
|
|
|
+ BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
|
|
|
|
|
|
- dev->irq = pdev->irq;
|
|
|
- dev->netdev_ops = &velocity_netdev_ops;
|
|
|
- dev->ethtool_ops = &velocity_ethtool_ops;
|
|
|
+ CHIPGCR = readb(®s->CHIPGCR);
|
|
|
+ CHIPGCR &= ~CHIPGCR_FCGMII;
|
|
|
|
|
|
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
|
|
|
- NETIF_F_HW_VLAN_RX;
|
|
|
+ if (mii_status & VELOCITY_DUPLEX_FULL) {
|
|
|
+ CHIPGCR |= CHIPGCR_FCFDX;
|
|
|
+ writeb(CHIPGCR, ®s->CHIPGCR);
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
|
|
|
+ if (vptr->rev_id < REV_ID_VT3216_A0)
|
|
|
+ BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
|
|
|
+ } else {
|
|
|
+ CHIPGCR &= ~CHIPGCR_FCFDX;
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
|
|
|
+ writeb(CHIPGCR, ®s->CHIPGCR);
|
|
|
+ if (vptr->rev_id < REV_ID_VT3216_A0)
|
|
|
+ BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
|
|
|
+ }
|
|
|
|
|
|
- if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
|
|
|
- dev->features |= NETIF_F_IP_CSUM;
|
|
|
+ MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
|
|
|
|
|
|
- ret = register_netdev(dev);
|
|
|
- if (ret < 0)
|
|
|
- goto err_iounmap;
|
|
|
+ if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
|
|
|
+ BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
|
|
|
+ else
|
|
|
+ BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
|
|
|
|
|
|
- if (!velocity_get_link(dev)) {
|
|
|
- netif_carrier_off(dev);
|
|
|
- vptr->mii_status |= VELOCITY_LINK_FAIL;
|
|
|
+ /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
|
|
|
+ velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
|
|
|
+ ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
|
|
|
+ if (mii_status & VELOCITY_SPEED_100) {
|
|
|
+ if (mii_status & VELOCITY_DUPLEX_FULL)
|
|
|
+ ANAR |= ANAR_TXFD;
|
|
|
+ else
|
|
|
+ ANAR |= ANAR_TX;
|
|
|
+ } else {
|
|
|
+ if (mii_status & VELOCITY_DUPLEX_FULL)
|
|
|
+ ANAR |= ANAR_10FD;
|
|
|
+ else
|
|
|
+ ANAR |= ANAR_10;
|
|
|
+ }
|
|
|
+ velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
|
|
|
+ /* enable AUTO-NEGO mode */
|
|
|
+ mii_set_auto_on(vptr);
|
|
|
+ /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
|
|
|
}
|
|
|
+ /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
|
|
|
+ /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
|
|
|
+ return VELOCITY_LINK_CHANGE;
|
|
|
+}
|
|
|
|
|
|
- velocity_print_info(vptr);
|
|
|
- pci_set_drvdata(pdev, dev);
|
|
|
+/**
|
|
|
+ * velocity_print_link_status - link status reporting
|
|
|
+ * @vptr: velocity to report on
|
|
|
+ *
|
|
|
+ * Turn the link status of the velocity card into a kernel log
|
|
|
+ * description of the new link state, detailing speed and duplex
|
|
|
+ * status
|
|
|
+ */
|
|
|
+static void velocity_print_link_status(struct velocity_info *vptr)
|
|
|
+{
|
|
|
|
|
|
- /* and leave the chip powered down */
|
|
|
+ if (vptr->mii_status & VELOCITY_LINK_FAIL) {
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
|
|
|
+ } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
|
|
|
|
|
|
- pci_set_power_state(pdev, PCI_D3hot);
|
|
|
-#ifdef CONFIG_PM
|
|
|
- {
|
|
|
- unsigned long flags;
|
|
|
+ if (vptr->mii_status & VELOCITY_SPEED_1000)
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
|
|
|
+ else if (vptr->mii_status & VELOCITY_SPEED_100)
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
|
|
|
+ else
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
|
|
|
|
|
|
- spin_lock_irqsave(&velocity_dev_list_lock, flags);
|
|
|
- list_add(&vptr->list, &velocity_dev_list);
|
|
|
- spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
|
|
|
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
|
|
|
+ else
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
|
|
|
+ } else {
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
|
|
|
+ switch (vptr->options.spd_dpx) {
|
|
|
+ case SPD_DPX_100_HALF:
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
|
|
|
+ break;
|
|
|
+ case SPD_DPX_100_FULL:
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
|
|
|
+ break;
|
|
|
+ case SPD_DPX_10_HALF:
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
|
|
|
+ break;
|
|
|
+ case SPD_DPX_10_FULL:
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
-#endif
|
|
|
- velocity_nics++;
|
|
|
-out:
|
|
|
- return ret;
|
|
|
-
|
|
|
-err_iounmap:
|
|
|
- iounmap(regs);
|
|
|
-err_release_res:
|
|
|
- pci_release_regions(pdev);
|
|
|
-err_disable:
|
|
|
- pci_disable_device(pdev);
|
|
|
-err_free_dev:
|
|
|
- free_netdev(dev);
|
|
|
- goto out;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_print_info - per driver data
|
|
|
- * @vptr: velocity
|
|
|
+ * enable_flow_control_ability - flow control
|
|
|
+ * @vptr: veloity to configure
|
|
|
*
|
|
|
- * Print per driver data as the kernel driver finds Velocity
|
|
|
- * hardware
|
|
|
+ * Set up flow control according to the flow control options
|
|
|
+ * determined by the eeprom/configuration.
|
|
|
*/
|
|
|
-
|
|
|
-static void __devinit velocity_print_info(struct velocity_info *vptr)
|
|
|
+static void enable_flow_control_ability(struct velocity_info *vptr)
|
|
|
{
|
|
|
- struct net_device *dev = vptr->dev;
|
|
|
|
|
|
- printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
|
|
|
- printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
|
|
|
- dev->name,
|
|
|
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
|
|
|
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
|
|
|
-}
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
|
|
|
-/**
|
|
|
- * velocity_init_info - init private data
|
|
|
- * @pdev: PCI device
|
|
|
- * @vptr: Velocity info
|
|
|
- * @info: Board type
|
|
|
- *
|
|
|
- * Set up the initial velocity_info struct for the device that has been
|
|
|
- * discovered.
|
|
|
- */
|
|
|
+ switch (vptr->options.flow_cntl) {
|
|
|
|
|
|
-static void __devinit velocity_init_info(struct pci_dev *pdev,
|
|
|
- struct velocity_info *vptr,
|
|
|
- const struct velocity_info_tbl *info)
|
|
|
-{
|
|
|
- memset(vptr, 0, sizeof(struct velocity_info));
|
|
|
+ case FLOW_CNTL_DEFAULT:
|
|
|
+ if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0))
|
|
|
+ writel(CR0_FDXRFCEN, ®s->CR0Set);
|
|
|
+ else
|
|
|
+ writel(CR0_FDXRFCEN, ®s->CR0Clr);
|
|
|
+
|
|
|
+ if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0))
|
|
|
+ writel(CR0_FDXTFCEN, ®s->CR0Set);
|
|
|
+ else
|
|
|
+ writel(CR0_FDXTFCEN, ®s->CR0Clr);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case FLOW_CNTL_TX:
|
|
|
+ writel(CR0_FDXTFCEN, ®s->CR0Set);
|
|
|
+ writel(CR0_FDXRFCEN, ®s->CR0Clr);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case FLOW_CNTL_RX:
|
|
|
+ writel(CR0_FDXRFCEN, ®s->CR0Set);
|
|
|
+ writel(CR0_FDXTFCEN, ®s->CR0Clr);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case FLOW_CNTL_TX_RX:
|
|
|
+ writel(CR0_FDXTFCEN, ®s->CR0Set);
|
|
|
+ writel(CR0_FDXRFCEN, ®s->CR0Set);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case FLOW_CNTL_DISABLE:
|
|
|
+ writel(CR0_FDXRFCEN, ®s->CR0Clr);
|
|
|
+ writel(CR0_FDXTFCEN, ®s->CR0Clr);
|
|
|
+ break;
|
|
|
+
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- vptr->pdev = pdev;
|
|
|
- vptr->chip_id = info->chip_id;
|
|
|
- vptr->tx.numq = info->txqueue;
|
|
|
- vptr->multicast_limit = MCAM_SIZE;
|
|
|
- spin_lock_init(&vptr->lock);
|
|
|
- INIT_LIST_HEAD(&vptr->list);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_get_pci_info - retrieve PCI info for device
|
|
|
- * @vptr: velocity device
|
|
|
- * @pdev: PCI device it matches
|
|
|
+ * velocity_soft_reset - soft reset
|
|
|
+ * @vptr: velocity to reset
|
|
|
*
|
|
|
- * Retrieve the PCI configuration space data that interests us from
|
|
|
- * the kernel PCI layer
|
|
|
+ * Kick off a soft reset of the velocity adapter and then poll
|
|
|
+ * until the reset sequence has completed before returning.
|
|
|
*/
|
|
|
-
|
|
|
-static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
|
|
|
+static int velocity_soft_reset(struct velocity_info *vptr)
|
|
|
{
|
|
|
- vptr->rev_id = pdev->revision;
|
|
|
-
|
|
|
- pci_set_master(pdev);
|
|
|
-
|
|
|
- vptr->ioaddr = pci_resource_start(pdev, 0);
|
|
|
- vptr->memaddr = pci_resource_start(pdev, 1);
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ int i = 0;
|
|
|
|
|
|
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
|
|
|
- dev_err(&pdev->dev,
|
|
|
- "region #0 is not an I/O resource, aborting.\n");
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
+ writel(CR0_SFRST, ®s->CR0Set);
|
|
|
|
|
|
- if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
|
|
|
- dev_err(&pdev->dev,
|
|
|
- "region #1 is an I/O resource, aborting.\n");
|
|
|
- return -EINVAL;
|
|
|
+ for (i = 0; i < W_MAX_TIMEOUT; i++) {
|
|
|
+ udelay(5);
|
|
|
+ if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set))
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
- if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
|
|
|
- dev_err(&pdev->dev, "region #1 is too small.\n");
|
|
|
- return -EINVAL;
|
|
|
+ if (i == W_MAX_TIMEOUT) {
|
|
|
+ writel(CR0_FORSRST, ®s->CR0Set);
|
|
|
+ /* FIXME: PCI POSTING */
|
|
|
+ /* delay 2ms */
|
|
|
+ mdelay(2);
|
|
|
}
|
|
|
- vptr->pdev = pdev;
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_init_dma_rings - set up DMA rings
|
|
|
- * @vptr: Velocity to set up
|
|
|
+ * velocity_set_multi - filter list change callback
|
|
|
+ * @dev: network device
|
|
|
*
|
|
|
- * Allocate PCI mapped DMA rings for the receive and transmit layer
|
|
|
- * to use.
|
|
|
- */
|
|
|
-
|
|
|
-static int velocity_init_dma_rings(struct velocity_info *vptr)
|
|
|
-{
|
|
|
- struct velocity_opt *opt = &vptr->options;
|
|
|
- const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
|
|
|
- const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
|
|
|
- struct pci_dev *pdev = vptr->pdev;
|
|
|
- dma_addr_t pool_dma;
|
|
|
- void *pool;
|
|
|
- unsigned int i;
|
|
|
-
|
|
|
- /*
|
|
|
- * Allocate all RD/TD rings a single pool.
|
|
|
- *
|
|
|
- * pci_alloc_consistent() fulfills the requirement for 64 bytes
|
|
|
- * alignment
|
|
|
- */
|
|
|
- pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
|
|
|
- rx_ring_size, &pool_dma);
|
|
|
- if (!pool) {
|
|
|
- dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
|
|
|
- vptr->dev->name);
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
-
|
|
|
- vptr->rx.ring = pool;
|
|
|
- vptr->rx.pool_dma = pool_dma;
|
|
|
-
|
|
|
- pool += rx_ring_size;
|
|
|
- pool_dma += rx_ring_size;
|
|
|
-
|
|
|
- for (i = 0; i < vptr->tx.numq; i++) {
|
|
|
- vptr->tx.rings[i] = pool;
|
|
|
- vptr->tx.pool_dma[i] = pool_dma;
|
|
|
- pool += tx_ring_size;
|
|
|
- pool_dma += tx_ring_size;
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * velocity_free_dma_rings - free PCI ring pointers
|
|
|
- * @vptr: Velocity to free from
|
|
|
- *
|
|
|
- * Clean up the PCI ring buffers allocated to this velocity.
|
|
|
+ * Called by the network layer when the filter lists need to change
|
|
|
+ * for a velocity adapter. Reload the CAMs with the new address
|
|
|
+ * filter ruleset.
|
|
|
*/
|
|
|
-
|
|
|
-static void velocity_free_dma_rings(struct velocity_info *vptr)
|
|
|
-{
|
|
|
- const int size = vptr->options.numrx * sizeof(struct rx_desc) +
|
|
|
- vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
|
|
|
-
|
|
|
- pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
|
|
|
-}
|
|
|
-
|
|
|
-static void velocity_give_many_rx_descs(struct velocity_info *vptr)
|
|
|
+static void velocity_set_multi(struct net_device *dev)
|
|
|
{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
- int avail, dirty, unusable;
|
|
|
-
|
|
|
- /*
|
|
|
- * RD number must be equal to 4X per hardware spec
|
|
|
- * (programming guide rev 1.20, p.13)
|
|
|
- */
|
|
|
- if (vptr->rx.filled < 4)
|
|
|
- return;
|
|
|
-
|
|
|
- wmb();
|
|
|
-
|
|
|
- unusable = vptr->rx.filled & 0x0003;
|
|
|
- dirty = vptr->rx.dirty - unusable;
|
|
|
- for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
|
|
|
- dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
|
|
|
- vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
|
|
|
- }
|
|
|
-
|
|
|
- writew(vptr->rx.filled & 0xfffc, ®s->RBRDU);
|
|
|
- vptr->rx.filled = unusable;
|
|
|
-}
|
|
|
-
|
|
|
-static int velocity_rx_refill(struct velocity_info *vptr)
|
|
|
-{
|
|
|
- int dirty = vptr->rx.dirty, done = 0;
|
|
|
-
|
|
|
- do {
|
|
|
- struct rx_desc *rd = vptr->rx.ring + dirty;
|
|
|
+ u8 rx_mode;
|
|
|
+ int i;
|
|
|
+ struct dev_mc_list *mclist;
|
|
|
|
|
|
- /* Fine for an all zero Rx desc at init time as well */
|
|
|
- if (rd->rdesc0.len & OWNED_BY_NIC)
|
|
|
- break;
|
|
|
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
|
|
|
+ writel(0xffffffff, ®s->MARCAM[0]);
|
|
|
+ writel(0xffffffff, ®s->MARCAM[4]);
|
|
|
+ rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
|
|
|
+ } else if ((dev->mc_count > vptr->multicast_limit)
|
|
|
+ || (dev->flags & IFF_ALLMULTI)) {
|
|
|
+ writel(0xffffffff, ®s->MARCAM[0]);
|
|
|
+ writel(0xffffffff, ®s->MARCAM[4]);
|
|
|
+ rx_mode = (RCR_AM | RCR_AB);
|
|
|
+ } else {
|
|
|
+ int offset = MCAM_SIZE - vptr->multicast_limit;
|
|
|
+ mac_get_cam_mask(regs, vptr->mCAMmask);
|
|
|
|
|
|
- if (!vptr->rx.info[dirty].skb) {
|
|
|
- if (velocity_alloc_rx_buf(vptr, dirty) < 0)
|
|
|
- break;
|
|
|
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
|
|
|
+ mac_set_cam(regs, i + offset, mclist->dmi_addr);
|
|
|
+ vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
|
|
|
}
|
|
|
- done++;
|
|
|
- dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
|
|
|
- } while (dirty != vptr->rx.curr);
|
|
|
|
|
|
- if (done) {
|
|
|
- vptr->rx.dirty = dirty;
|
|
|
- vptr->rx.filled += done;
|
|
|
+ mac_set_cam_mask(regs, vptr->mCAMmask);
|
|
|
+ rx_mode = RCR_AM | RCR_AB | RCR_AP;
|
|
|
}
|
|
|
+ if (dev->mtu > 1500)
|
|
|
+ rx_mode |= RCR_AL;
|
|
|
|
|
|
- return done;
|
|
|
-}
|
|
|
+ BYTE_REG_BITS_ON(rx_mode, ®s->RCR);
|
|
|
|
|
|
-static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
|
|
|
-{
|
|
|
- vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * velocity_init_rd_ring - set up receive ring
|
|
|
- * @vptr: velocity to configure
|
|
|
- *
|
|
|
- * Allocate and set up the receive buffers for each ring slot and
|
|
|
- * assign them to the network adapter.
|
|
|
+/*
|
|
|
+ * MII access , media link mode setting functions
|
|
|
*/
|
|
|
|
|
|
-static int velocity_init_rd_ring(struct velocity_info *vptr)
|
|
|
-{
|
|
|
- int ret = -ENOMEM;
|
|
|
-
|
|
|
- vptr->rx.info = kcalloc(vptr->options.numrx,
|
|
|
- sizeof(struct velocity_rd_info), GFP_KERNEL);
|
|
|
- if (!vptr->rx.info)
|
|
|
- goto out;
|
|
|
-
|
|
|
- velocity_init_rx_ring_indexes(vptr);
|
|
|
-
|
|
|
- if (velocity_rx_refill(vptr) != vptr->options.numrx) {
|
|
|
- VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
|
|
|
- "%s: failed to allocate RX buffer.\n", vptr->dev->name);
|
|
|
- velocity_free_rd_ring(vptr);
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- ret = 0;
|
|
|
-out:
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
- * velocity_free_rd_ring - free receive ring
|
|
|
- * @vptr: velocity to clean up
|
|
|
+ * mii_init - set up MII
|
|
|
+ * @vptr: velocity adapter
|
|
|
+ * @mii_status: links tatus
|
|
|
*
|
|
|
- * Free the receive buffers for each ring slot and any
|
|
|
- * attached socket buffers that need to go away.
|
|
|
+ * Set up the PHY for the current link state.
|
|
|
*/
|
|
|
-
|
|
|
-static void velocity_free_rd_ring(struct velocity_info *vptr)
|
|
|
+static void mii_init(struct velocity_info *vptr, u32 mii_status)
|
|
|
{
|
|
|
- int i;
|
|
|
-
|
|
|
- if (vptr->rx.info == NULL)
|
|
|
- return;
|
|
|
-
|
|
|
- for (i = 0; i < vptr->options.numrx; i++) {
|
|
|
- struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
|
|
|
- struct rx_desc *rd = vptr->rx.ring + i;
|
|
|
-
|
|
|
- memset(rd, 0, sizeof(*rd));
|
|
|
+ u16 BMCR;
|
|
|
|
|
|
- if (!rd_info->skb)
|
|
|
- continue;
|
|
|
- pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
- rd_info->skb_dma = 0;
|
|
|
+ switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
|
|
|
+ case PHYID_CICADA_CS8201:
|
|
|
+ /*
|
|
|
+ * Reset to hardware default
|
|
|
+ */
|
|
|
+ MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ /*
|
|
|
+ * Turn on ECHODIS bit in NWay-forced full mode and turn it
|
|
|
+ * off it in NWay-forced half mode for NWay-forced v.s.
|
|
|
+ * legacy-forced issue.
|
|
|
+ */
|
|
|
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
|
|
|
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
|
|
|
+ else
|
|
|
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
|
|
|
+ /*
|
|
|
+ * Turn on Link/Activity LED enable bit for CIS8201
|
|
|
+ */
|
|
|
+ MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
|
|
|
+ break;
|
|
|
+ case PHYID_VT3216_32BIT:
|
|
|
+ case PHYID_VT3216_64BIT:
|
|
|
+ /*
|
|
|
+ * Reset to hardware default
|
|
|
+ */
|
|
|
+ MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ /*
|
|
|
+ * Turn on ECHODIS bit in NWay-forced full mode and turn it
|
|
|
+ * off it in NWay-forced half mode for NWay-forced v.s.
|
|
|
+ * legacy-forced issue
|
|
|
+ */
|
|
|
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
|
|
|
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
|
|
|
+ else
|
|
|
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
|
|
|
+ break;
|
|
|
|
|
|
- dev_kfree_skb(rd_info->skb);
|
|
|
- rd_info->skb = NULL;
|
|
|
+ case PHYID_MARVELL_1000:
|
|
|
+ case PHYID_MARVELL_1000S:
|
|
|
+ /*
|
|
|
+ * Assert CRS on Transmit
|
|
|
+ */
|
|
|
+ MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
|
|
|
+ /*
|
|
|
+ * Reset to hardware default
|
|
|
+ */
|
|
|
+ MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ ;
|
|
|
+ }
|
|
|
+ velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
|
|
|
+ if (BMCR & BMCR_ISO) {
|
|
|
+ BMCR &= ~BMCR_ISO;
|
|
|
+ velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
|
|
|
}
|
|
|
-
|
|
|
- kfree(vptr->rx.info);
|
|
|
- vptr->rx.info = NULL;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
/**
|
|
|
- * velocity_init_td_ring - set up transmit ring
|
|
|
- * @vptr: velocity
|
|
|
+ * velocity_init_registers - initialise MAC registers
|
|
|
+ * @vptr: velocity to init
|
|
|
+ * @type: type of initialisation (hot or cold)
|
|
|
*
|
|
|
- * Set up the transmit ring and chain the ring pointers together.
|
|
|
- * Returns zero on success or a negative posix errno code for
|
|
|
- * failure.
|
|
|
+ * Initialise the MAC on a reset or on first set up on the
|
|
|
+ * hardware.
|
|
|
*/
|
|
|
-
|
|
|
-static int velocity_init_td_ring(struct velocity_info *vptr)
|
|
|
+static void velocity_init_registers(struct velocity_info *vptr,
|
|
|
+ enum velocity_init_type type)
|
|
|
{
|
|
|
- dma_addr_t curr;
|
|
|
- int j;
|
|
|
-
|
|
|
- /* Init the TD ring entries */
|
|
|
- for (j = 0; j < vptr->tx.numq; j++) {
|
|
|
- curr = vptr->tx.pool_dma[j];
|
|
|
-
|
|
|
- vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
|
|
|
- sizeof(struct velocity_td_info),
|
|
|
- GFP_KERNEL);
|
|
|
- if (!vptr->tx.infos[j]) {
|
|
|
- while (--j >= 0)
|
|
|
- kfree(vptr->tx.infos[j]);
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ int i, mii_status;
|
|
|
|
|
|
- vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
+ mac_wol_reset(regs);
|
|
|
|
|
|
-/*
|
|
|
- * FIXME: could we merge this with velocity_free_tx_buf ?
|
|
|
- */
|
|
|
+ switch (type) {
|
|
|
+ case VELOCITY_INIT_RESET:
|
|
|
+ case VELOCITY_INIT_WOL:
|
|
|
|
|
|
-static void velocity_free_td_ring_entry(struct velocity_info *vptr,
|
|
|
- int q, int n)
|
|
|
-{
|
|
|
- struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
|
|
|
- int i;
|
|
|
+ netif_stop_queue(vptr->dev);
|
|
|
|
|
|
- if (td_info == NULL)
|
|
|
- return;
|
|
|
+ /*
|
|
|
+ * Reset RX to prevent RX pointer not on the 4X location
|
|
|
+ */
|
|
|
+ velocity_rx_reset(vptr);
|
|
|
+ mac_rx_queue_run(regs);
|
|
|
+ mac_rx_queue_wake(regs);
|
|
|
|
|
|
- if (td_info->skb) {
|
|
|
- for (i = 0; i < td_info->nskb_dma; i++) {
|
|
|
- if (td_info->skb_dma[i]) {
|
|
|
- pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
|
|
|
- td_info->skb->len, PCI_DMA_TODEVICE);
|
|
|
- td_info->skb_dma[i] = 0;
|
|
|
- }
|
|
|
+ mii_status = velocity_get_opt_media_mode(vptr);
|
|
|
+ if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
|
|
|
+ velocity_print_link_status(vptr);
|
|
|
+ if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
|
|
|
+ netif_wake_queue(vptr->dev);
|
|
|
}
|
|
|
- dev_kfree_skb(td_info->skb);
|
|
|
- td_info->skb = NULL;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * velocity_free_td_ring - free td ring
|
|
|
- * @vptr: velocity
|
|
|
- *
|
|
|
- * Free up the transmit ring for this particular velocity adapter.
|
|
|
- * We free the ring contents but not the ring itself.
|
|
|
- */
|
|
|
-
|
|
|
-static void velocity_free_td_ring(struct velocity_info *vptr)
|
|
|
-{
|
|
|
- int i, j;
|
|
|
-
|
|
|
- for (j = 0; j < vptr->tx.numq; j++) {
|
|
|
- if (vptr->tx.infos[j] == NULL)
|
|
|
- continue;
|
|
|
- for (i = 0; i < vptr->options.numtx; i++)
|
|
|
- velocity_free_td_ring_entry(vptr, j, i);
|
|
|
|
|
|
- kfree(vptr->tx.infos[j]);
|
|
|
- vptr->tx.infos[j] = NULL;
|
|
|
- }
|
|
|
-}
|
|
|
+ enable_flow_control_ability(vptr);
|
|
|
|
|
|
-/**
|
|
|
- * velocity_rx_srv - service RX interrupt
|
|
|
- * @vptr: velocity
|
|
|
- * @status: adapter status (unused)
|
|
|
- *
|
|
|
- * Walk the receive ring of the velocity adapter and remove
|
|
|
- * any received packets from the receive queue. Hand the ring
|
|
|
- * slots back to the adapter for reuse.
|
|
|
- */
|
|
|
+ mac_clear_isr(regs);
|
|
|
+ writel(CR0_STOP, ®s->CR0Clr);
|
|
|
+ writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
|
|
|
+ ®s->CR0Set);
|
|
|
|
|
|
-static int velocity_rx_srv(struct velocity_info *vptr, int status)
|
|
|
-{
|
|
|
- struct net_device_stats *stats = &vptr->dev->stats;
|
|
|
- int rd_curr = vptr->rx.curr;
|
|
|
- int works = 0;
|
|
|
+ break;
|
|
|
|
|
|
- do {
|
|
|
- struct rx_desc *rd = vptr->rx.ring + rd_curr;
|
|
|
+ case VELOCITY_INIT_COLD:
|
|
|
+ default:
|
|
|
+ /*
|
|
|
+ * Do reset
|
|
|
+ */
|
|
|
+ velocity_soft_reset(vptr);
|
|
|
+ mdelay(5);
|
|
|
|
|
|
- if (!vptr->rx.info[rd_curr].skb)
|
|
|
- break;
|
|
|
+ mac_eeprom_reload(regs);
|
|
|
+ for (i = 0; i < 6; i++)
|
|
|
+ writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
|
|
|
|
|
|
- if (rd->rdesc0.len & OWNED_BY_NIC)
|
|
|
- break;
|
|
|
+ /*
|
|
|
+ * clear Pre_ACPI bit.
|
|
|
+ */
|
|
|
+ BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
|
|
|
+ mac_set_rx_thresh(regs, vptr->options.rx_thresh);
|
|
|
+ mac_set_dma_length(regs, vptr->options.DMA_length);
|
|
|
|
|
|
- rmb();
|
|
|
+ writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet);
|
|
|
+ /*
|
|
|
+ * Back off algorithm use original IEEE standard
|
|
|
+ */
|
|
|
+ BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB);
|
|
|
|
|
|
/*
|
|
|
- * Don't drop CE or RL error frame although RXOK is off
|
|
|
+ * Init CAM filter
|
|
|
*/
|
|
|
- if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
|
|
|
- if (velocity_receive_frame(vptr, rd_curr) < 0)
|
|
|
- stats->rx_dropped++;
|
|
|
- } else {
|
|
|
- if (rd->rdesc0.RSR & RSR_CRC)
|
|
|
- stats->rx_crc_errors++;
|
|
|
- if (rd->rdesc0.RSR & RSR_FAE)
|
|
|
- stats->rx_frame_errors++;
|
|
|
+ velocity_init_cam_filter(vptr);
|
|
|
|
|
|
- stats->rx_dropped++;
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * Set packet filter: Receive directed and broadcast address
|
|
|
+ */
|
|
|
+ velocity_set_multi(vptr->dev);
|
|
|
|
|
|
- rd->size |= RX_INTEN;
|
|
|
+ /*
|
|
|
+ * Enable MII auto-polling
|
|
|
+ */
|
|
|
+ enable_mii_autopoll(regs);
|
|
|
|
|
|
- rd_curr++;
|
|
|
- if (rd_curr >= vptr->options.numrx)
|
|
|
- rd_curr = 0;
|
|
|
- } while (++works <= 15);
|
|
|
+ vptr->int_mask = INT_MASK_DEF;
|
|
|
|
|
|
- vptr->rx.curr = rd_curr;
|
|
|
+ writel(vptr->rx.pool_dma, ®s->RDBaseLo);
|
|
|
+ writew(vptr->options.numrx - 1, ®s->RDCSize);
|
|
|
+ mac_rx_queue_run(regs);
|
|
|
+ mac_rx_queue_wake(regs);
|
|
|
|
|
|
- if ((works > 0) && (velocity_rx_refill(vptr) > 0))
|
|
|
- velocity_give_many_rx_descs(vptr);
|
|
|
+ writew(vptr->options.numtx - 1, ®s->TDCSize);
|
|
|
|
|
|
- VAR_USED(stats);
|
|
|
- return works;
|
|
|
-}
|
|
|
+ for (i = 0; i < vptr->tx.numq; i++) {
|
|
|
+ writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]);
|
|
|
+ mac_tx_queue_run(regs, i);
|
|
|
+ }
|
|
|
|
|
|
-/**
|
|
|
- * velocity_rx_csum - checksum process
|
|
|
- * @rd: receive packet descriptor
|
|
|
- * @skb: network layer packet buffer
|
|
|
- *
|
|
|
- * Process the status bits for the received packet and determine
|
|
|
- * if the checksum was computed and verified by the hardware
|
|
|
- */
|
|
|
+ init_flow_control_register(vptr);
|
|
|
|
|
|
-static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
|
|
|
-{
|
|
|
- skb->ip_summed = CHECKSUM_NONE;
|
|
|
+ writel(CR0_STOP, ®s->CR0Clr);
|
|
|
+ writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set);
|
|
|
|
|
|
- if (rd->rdesc1.CSM & CSM_IPKT) {
|
|
|
- if (rd->rdesc1.CSM & CSM_IPOK) {
|
|
|
- if ((rd->rdesc1.CSM & CSM_TCPKT) ||
|
|
|
- (rd->rdesc1.CSM & CSM_UDPKT)) {
|
|
|
- if (!(rd->rdesc1.CSM & CSM_TUPOK))
|
|
|
- return;
|
|
|
- }
|
|
|
- skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
+ mii_status = velocity_get_opt_media_mode(vptr);
|
|
|
+ netif_stop_queue(vptr->dev);
|
|
|
|
|
|
-/**
|
|
|
- * velocity_rx_copy - in place Rx copy for small packets
|
|
|
- * @rx_skb: network layer packet buffer candidate
|
|
|
- * @pkt_size: received data size
|
|
|
- * @rd: receive packet descriptor
|
|
|
- * @dev: network device
|
|
|
- *
|
|
|
- * Replace the current skb that is scheduled for Rx processing by a
|
|
|
- * shorter, immediatly allocated skb, if the received packet is small
|
|
|
- * enough. This function returns a negative value if the received
|
|
|
- * packet is too big or if memory is exhausted.
|
|
|
- */
|
|
|
-static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
|
|
|
- struct velocity_info *vptr)
|
|
|
-{
|
|
|
- int ret = -1;
|
|
|
- if (pkt_size < rx_copybreak) {
|
|
|
- struct sk_buff *new_skb;
|
|
|
+ mii_init(vptr, mii_status);
|
|
|
|
|
|
- new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
|
|
|
- if (new_skb) {
|
|
|
- new_skb->ip_summed = rx_skb[0]->ip_summed;
|
|
|
- skb_reserve(new_skb, 2);
|
|
|
- skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
|
|
|
- *rx_skb = new_skb;
|
|
|
- ret = 0;
|
|
|
+ if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
|
|
|
+ velocity_print_link_status(vptr);
|
|
|
+ if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
|
|
|
+ netif_wake_queue(vptr->dev);
|
|
|
}
|
|
|
|
|
|
+ enable_flow_control_ability(vptr);
|
|
|
+ mac_hw_mibs_init(regs);
|
|
|
+ mac_write_int_mask(vptr->int_mask, regs);
|
|
|
+ mac_clear_isr(regs);
|
|
|
+
|
|
|
}
|
|
|
- return ret;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * velocity_iph_realign - IP header alignment
|
|
|
- * @vptr: velocity we are handling
|
|
|
- * @skb: network layer packet buffer
|
|
|
- * @pkt_size: received data size
|
|
|
- *
|
|
|
- * Align IP header on a 2 bytes boundary. This behavior can be
|
|
|
- * configured by the user.
|
|
|
- */
|
|
|
-static inline void velocity_iph_realign(struct velocity_info *vptr,
|
|
|
- struct sk_buff *skb, int pkt_size)
|
|
|
+static void velocity_give_many_rx_descs(struct velocity_info *vptr)
|
|
|
{
|
|
|
- if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
|
|
|
- memmove(skb->data + 2, skb->data, pkt_size);
|
|
|
- skb_reserve(skb, 2);
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ int avail, dirty, unusable;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * RD number must be equal to 4X per hardware spec
|
|
|
+ * (programming guide rev 1.20, p.13)
|
|
|
+ */
|
|
|
+ if (vptr->rx.filled < 4)
|
|
|
+ return;
|
|
|
+
|
|
|
+ wmb();
|
|
|
+
|
|
|
+ unusable = vptr->rx.filled & 0x0003;
|
|
|
+ dirty = vptr->rx.dirty - unusable;
|
|
|
+ for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
|
|
|
+ dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
|
|
|
+ vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
|
|
|
}
|
|
|
+
|
|
|
+ writew(vptr->rx.filled & 0xfffc, ®s->RBRDU);
|
|
|
+ vptr->rx.filled = unusable;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_receive_frame - received packet processor
|
|
|
- * @vptr: velocity we are handling
|
|
|
- * @idx: ring index
|
|
|
+ * velocity_init_dma_rings - set up DMA rings
|
|
|
+ * @vptr: Velocity to set up
|
|
|
*
|
|
|
- * A packet has arrived. We process the packet and if appropriate
|
|
|
- * pass the frame up the network stack
|
|
|
+ * Allocate PCI mapped DMA rings for the receive and transmit layer
|
|
|
+ * to use.
|
|
|
*/
|
|
|
-
|
|
|
-static int velocity_receive_frame(struct velocity_info *vptr, int idx)
|
|
|
+static int velocity_init_dma_rings(struct velocity_info *vptr)
|
|
|
{
|
|
|
- void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
|
|
|
- struct net_device_stats *stats = &vptr->dev->stats;
|
|
|
- struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
|
|
|
- struct rx_desc *rd = &(vptr->rx.ring[idx]);
|
|
|
- int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
|
|
|
- struct sk_buff *skb;
|
|
|
-
|
|
|
- if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
|
|
|
- VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
|
|
|
- stats->rx_length_errors++;
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
- if (rd->rdesc0.RSR & RSR_MAR)
|
|
|
- stats->multicast++;
|
|
|
-
|
|
|
- skb = rd_info->skb;
|
|
|
-
|
|
|
- pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
|
|
|
- vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
|
|
|
+ struct velocity_opt *opt = &vptr->options;
|
|
|
+ const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
|
|
|
+ const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
|
|
|
+ struct pci_dev *pdev = vptr->pdev;
|
|
|
+ dma_addr_t pool_dma;
|
|
|
+ void *pool;
|
|
|
+ unsigned int i;
|
|
|
|
|
|
/*
|
|
|
- * Drop frame not meeting IEEE 802.3
|
|
|
+ * Allocate all RD/TD rings a single pool.
|
|
|
+ *
|
|
|
+ * pci_alloc_consistent() fulfills the requirement for 64 bytes
|
|
|
+ * alignment
|
|
|
*/
|
|
|
-
|
|
|
- if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
|
|
|
- if (rd->rdesc0.RSR & RSR_RL) {
|
|
|
- stats->rx_length_errors++;
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- pci_action = pci_dma_sync_single_for_device;
|
|
|
-
|
|
|
- velocity_rx_csum(rd, skb);
|
|
|
-
|
|
|
- if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
|
|
|
- velocity_iph_realign(vptr, skb, pkt_len);
|
|
|
- pci_action = pci_unmap_single;
|
|
|
- rd_info->skb = NULL;
|
|
|
+ pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
|
|
|
+ rx_ring_size, &pool_dma);
|
|
|
+ if (!pool) {
|
|
|
+ dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
|
|
|
+ vptr->dev->name);
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
-
|
|
|
- skb_put(skb, pkt_len - 4);
|
|
|
- skb->protocol = eth_type_trans(skb, vptr->dev);
|
|
|
+ vptr->rx.ring = pool;
|
|
|
+ vptr->rx.pool_dma = pool_dma;
|
|
|
|
|
|
- if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
|
|
|
- vlan_hwaccel_rx(skb, vptr->vlgrp,
|
|
|
- swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
|
|
|
- } else
|
|
|
- netif_rx(skb);
|
|
|
+ pool += rx_ring_size;
|
|
|
+ pool_dma += rx_ring_size;
|
|
|
|
|
|
- stats->rx_bytes += pkt_len;
|
|
|
+ for (i = 0; i < vptr->tx.numq; i++) {
|
|
|
+ vptr->tx.rings[i] = pool;
|
|
|
+ vptr->tx.pool_dma[i] = pool_dma;
|
|
|
+ pool += tx_ring_size;
|
|
|
+ pool_dma += tx_ring_size;
|
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
|
|
|
+{
|
|
|
+ vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* velocity_alloc_rx_buf - allocate aligned receive buffer
|
|
|
* @vptr: velocity
|
|
@@ -1582,7 +1470,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
|
|
|
* requires *64* byte alignment of the buffer which makes life
|
|
|
* less fun than would be ideal.
|
|
|
*/
|
|
|
-
|
|
|
static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
|
|
|
{
|
|
|
struct rx_desc *rd = &(vptr->rx.ring[idx]);
|
|
@@ -1611,243 +1498,146 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * tx_srv - transmit interrupt service
|
|
|
- * @vptr; Velocity
|
|
|
- * @status:
|
|
|
- *
|
|
|
- * Scan the queues looking for transmitted packets that
|
|
|
- * we can complete and clean up. Update any statistics as
|
|
|
- * necessary/
|
|
|
- */
|
|
|
|
|
|
-static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
|
|
|
+static int velocity_rx_refill(struct velocity_info *vptr)
|
|
|
{
|
|
|
- struct tx_desc *td;
|
|
|
- int qnum;
|
|
|
- int full = 0;
|
|
|
- int idx;
|
|
|
- int works = 0;
|
|
|
- struct velocity_td_info *tdinfo;
|
|
|
- struct net_device_stats *stats = &vptr->dev->stats;
|
|
|
-
|
|
|
- for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
|
|
|
- for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
|
|
|
- idx = (idx + 1) % vptr->options.numtx) {
|
|
|
+ int dirty = vptr->rx.dirty, done = 0;
|
|
|
|
|
|
- /*
|
|
|
- * Get Tx Descriptor
|
|
|
- */
|
|
|
- td = &(vptr->tx.rings[qnum][idx]);
|
|
|
- tdinfo = &(vptr->tx.infos[qnum][idx]);
|
|
|
+ do {
|
|
|
+ struct rx_desc *rd = vptr->rx.ring + dirty;
|
|
|
|
|
|
- if (td->tdesc0.len & OWNED_BY_NIC)
|
|
|
- break;
|
|
|
+ /* Fine for an all zero Rx desc at init time as well */
|
|
|
+ if (rd->rdesc0.len & OWNED_BY_NIC)
|
|
|
+ break;
|
|
|
|
|
|
- if ((works++ > 15))
|
|
|
+ if (!vptr->rx.info[dirty].skb) {
|
|
|
+ if (velocity_alloc_rx_buf(vptr, dirty) < 0)
|
|
|
break;
|
|
|
-
|
|
|
- if (td->tdesc0.TSR & TSR0_TERR) {
|
|
|
- stats->tx_errors++;
|
|
|
- stats->tx_dropped++;
|
|
|
- if (td->tdesc0.TSR & TSR0_CDH)
|
|
|
- stats->tx_heartbeat_errors++;
|
|
|
- if (td->tdesc0.TSR & TSR0_CRS)
|
|
|
- stats->tx_carrier_errors++;
|
|
|
- if (td->tdesc0.TSR & TSR0_ABT)
|
|
|
- stats->tx_aborted_errors++;
|
|
|
- if (td->tdesc0.TSR & TSR0_OWC)
|
|
|
- stats->tx_window_errors++;
|
|
|
- } else {
|
|
|
- stats->tx_packets++;
|
|
|
- stats->tx_bytes += tdinfo->skb->len;
|
|
|
- }
|
|
|
- velocity_free_tx_buf(vptr, tdinfo);
|
|
|
- vptr->tx.used[qnum]--;
|
|
|
}
|
|
|
- vptr->tx.tail[qnum] = idx;
|
|
|
+ done++;
|
|
|
+ dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
|
|
|
+ } while (dirty != vptr->rx.curr);
|
|
|
|
|
|
- if (AVAIL_TD(vptr, qnum) < 1)
|
|
|
- full = 1;
|
|
|
- }
|
|
|
- /*
|
|
|
- * Look to see if we should kick the transmit network
|
|
|
- * layer for more work.
|
|
|
- */
|
|
|
- if (netif_queue_stopped(vptr->dev) && (full == 0)
|
|
|
- && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
|
|
|
- netif_wake_queue(vptr->dev);
|
|
|
+ if (done) {
|
|
|
+ vptr->rx.dirty = dirty;
|
|
|
+ vptr->rx.filled += done;
|
|
|
}
|
|
|
- return works;
|
|
|
+
|
|
|
+ return done;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_print_link_status - link status reporting
|
|
|
- * @vptr: velocity to report on
|
|
|
+ * velocity_free_rd_ring - free receive ring
|
|
|
+ * @vptr: velocity to clean up
|
|
|
*
|
|
|
- * Turn the link status of the velocity card into a kernel log
|
|
|
- * description of the new link state, detailing speed and duplex
|
|
|
- * status
|
|
|
+ * Free the receive buffers for each ring slot and any
|
|
|
+ * attached socket buffers that need to go away.
|
|
|
*/
|
|
|
-
|
|
|
-static void velocity_print_link_status(struct velocity_info *vptr)
|
|
|
+static void velocity_free_rd_ring(struct velocity_info *vptr)
|
|
|
{
|
|
|
+ int i;
|
|
|
|
|
|
- if (vptr->mii_status & VELOCITY_LINK_FAIL) {
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
|
|
|
- } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
|
|
|
-
|
|
|
- if (vptr->mii_status & VELOCITY_SPEED_1000)
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
|
|
|
- else if (vptr->mii_status & VELOCITY_SPEED_100)
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
|
|
|
- else
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
|
|
|
-
|
|
|
- if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
|
|
|
- else
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
|
|
|
- } else {
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
|
|
|
- switch (vptr->options.spd_dpx) {
|
|
|
- case SPD_DPX_100_HALF:
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
|
|
|
- break;
|
|
|
- case SPD_DPX_100_FULL:
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
|
|
|
- break;
|
|
|
- case SPD_DPX_10_HALF:
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
|
|
|
- break;
|
|
|
- case SPD_DPX_10_FULL:
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
|
|
|
- break;
|
|
|
- default:
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * velocity_error - handle error from controller
|
|
|
- * @vptr: velocity
|
|
|
- * @status: card status
|
|
|
- *
|
|
|
- * Process an error report from the hardware and attempt to recover
|
|
|
- * the card itself. At the moment we cannot recover from some
|
|
|
- * theoretically impossible errors but this could be fixed using
|
|
|
- * the pci_device_failed logic to bounce the hardware
|
|
|
- *
|
|
|
- */
|
|
|
+ if (vptr->rx.info == NULL)
|
|
|
+ return;
|
|
|
|
|
|
-static void velocity_error(struct velocity_info *vptr, int status)
|
|
|
-{
|
|
|
+ for (i = 0; i < vptr->options.numrx; i++) {
|
|
|
+ struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
|
|
|
+ struct rx_desc *rd = vptr->rx.ring + i;
|
|
|
|
|
|
- if (status & ISR_TXSTLI) {
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ memset(rd, 0, sizeof(*rd));
|
|
|
|
|
|
- printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0]));
|
|
|
- BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR);
|
|
|
- writew(TRDCSR_RUN, ®s->TDCSRClr);
|
|
|
- netif_stop_queue(vptr->dev);
|
|
|
+ if (!rd_info->skb)
|
|
|
+ continue;
|
|
|
+ pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
+ rd_info->skb_dma = 0;
|
|
|
|
|
|
- /* FIXME: port over the pci_device_failed code and use it
|
|
|
- here */
|
|
|
+ dev_kfree_skb(rd_info->skb);
|
|
|
+ rd_info->skb = NULL;
|
|
|
}
|
|
|
|
|
|
- if (status & ISR_SRCI) {
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
- int linked;
|
|
|
-
|
|
|
- if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
|
|
|
- vptr->mii_status = check_connection_type(regs);
|
|
|
+ kfree(vptr->rx.info);
|
|
|
+ vptr->rx.info = NULL;
|
|
|
+}
|
|
|
|
|
|
- /*
|
|
|
- * If it is a 3119, disable frame bursting in
|
|
|
- * halfduplex mode and enable it in fullduplex
|
|
|
- * mode
|
|
|
- */
|
|
|
- if (vptr->rev_id < REV_ID_VT3216_A0) {
|
|
|
- if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
|
|
|
- BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
|
|
|
- else
|
|
|
- BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
|
|
|
- }
|
|
|
- /*
|
|
|
- * Only enable CD heart beat counter in 10HD mode
|
|
|
- */
|
|
|
- if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
|
|
|
- BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
|
|
|
- else
|
|
|
- BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
|
|
|
- }
|
|
|
- /*
|
|
|
- * Get link status from PHYSR0
|
|
|
- */
|
|
|
- linked = readb(®s->PHYSR0) & PHYSR0_LINKGD;
|
|
|
|
|
|
- if (linked) {
|
|
|
- vptr->mii_status &= ~VELOCITY_LINK_FAIL;
|
|
|
- netif_carrier_on(vptr->dev);
|
|
|
- } else {
|
|
|
- vptr->mii_status |= VELOCITY_LINK_FAIL;
|
|
|
- netif_carrier_off(vptr->dev);
|
|
|
- }
|
|
|
|
|
|
- velocity_print_link_status(vptr);
|
|
|
- enable_flow_control_ability(vptr);
|
|
|
+/**
|
|
|
+ * velocity_init_rd_ring - set up receive ring
|
|
|
+ * @vptr: velocity to configure
|
|
|
+ *
|
|
|
+ * Allocate and set up the receive buffers for each ring slot and
|
|
|
+ * assign them to the network adapter.
|
|
|
+ */
|
|
|
+static int velocity_init_rd_ring(struct velocity_info *vptr)
|
|
|
+{
|
|
|
+ int ret = -ENOMEM;
|
|
|
|
|
|
- /*
|
|
|
- * Re-enable auto-polling because SRCI will disable
|
|
|
- * auto-polling
|
|
|
- */
|
|
|
+ vptr->rx.info = kcalloc(vptr->options.numrx,
|
|
|
+ sizeof(struct velocity_rd_info), GFP_KERNEL);
|
|
|
+ if (!vptr->rx.info)
|
|
|
+ goto out;
|
|
|
|
|
|
- enable_mii_autopoll(regs);
|
|
|
+ velocity_init_rx_ring_indexes(vptr);
|
|
|
|
|
|
- if (vptr->mii_status & VELOCITY_LINK_FAIL)
|
|
|
- netif_stop_queue(vptr->dev);
|
|
|
- else
|
|
|
- netif_wake_queue(vptr->dev);
|
|
|
+ if (velocity_rx_refill(vptr) != vptr->options.numrx) {
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
|
|
|
+ "%s: failed to allocate RX buffer.\n", vptr->dev->name);
|
|
|
+ velocity_free_rd_ring(vptr);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- };
|
|
|
- if (status & ISR_MIBFI)
|
|
|
- velocity_update_hw_mibs(vptr);
|
|
|
- if (status & ISR_LSTEI)
|
|
|
- mac_rx_queue_wake(vptr->mac_regs);
|
|
|
+ ret = 0;
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_free_tx_buf - free transmit buffer
|
|
|
- * @vptr: velocity
|
|
|
- * @tdinfo: buffer
|
|
|
+ * velocity_init_td_ring - set up transmit ring
|
|
|
+ * @vptr: velocity
|
|
|
*
|
|
|
- * Release an transmit buffer. If the buffer was preallocated then
|
|
|
- * recycle it, if not then unmap the buffer.
|
|
|
+ * Set up the transmit ring and chain the ring pointers together.
|
|
|
+ * Returns zero on success or a negative posix errno code for
|
|
|
+ * failure.
|
|
|
*/
|
|
|
-
|
|
|
-static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
|
|
|
+static int velocity_init_td_ring(struct velocity_info *vptr)
|
|
|
{
|
|
|
- struct sk_buff *skb = tdinfo->skb;
|
|
|
- int i;
|
|
|
- int pktlen;
|
|
|
+ dma_addr_t curr;
|
|
|
+ int j;
|
|
|
|
|
|
- /*
|
|
|
- * Don't unmap the pre-allocated tx_bufs
|
|
|
- */
|
|
|
- if (tdinfo->skb_dma) {
|
|
|
+ /* Init the TD ring entries */
|
|
|
+ for (j = 0; j < vptr->tx.numq; j++) {
|
|
|
+ curr = vptr->tx.pool_dma[j];
|
|
|
|
|
|
- pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
|
|
|
- for (i = 0; i < tdinfo->nskb_dma; i++) {
|
|
|
- pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
|
|
|
- tdinfo->skb_dma[i] = 0;
|
|
|
+ vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
|
|
|
+ sizeof(struct velocity_td_info),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!vptr->tx.infos[j]) {
|
|
|
+ while (--j >= 0)
|
|
|
+ kfree(vptr->tx.infos[j]);
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
+
|
|
|
+ vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
|
|
|
}
|
|
|
- dev_kfree_skb_irq(skb);
|
|
|
- tdinfo->skb = NULL;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * velocity_free_dma_rings - free PCI ring pointers
|
|
|
+ * @vptr: Velocity to free from
|
|
|
+ *
|
|
|
+ * Clean up the PCI ring buffers allocated to this velocity.
|
|
|
+ */
|
|
|
+static void velocity_free_dma_rings(struct velocity_info *vptr)
|
|
|
+{
|
|
|
+ const int size = vptr->options.numrx * sizeof(struct rx_desc) +
|
|
|
+ vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
|
|
|
+
|
|
|
+ pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
|
|
|
}
|
|
|
|
|
|
+
|
|
|
static int velocity_init_rings(struct velocity_info *vptr, int mtu)
|
|
|
{
|
|
|
int ret;
|
|
@@ -1875,1138 +1665,979 @@ err_free_dma_rings_0:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
-static void velocity_free_rings(struct velocity_info *vptr)
|
|
|
-{
|
|
|
- velocity_free_td_ring(vptr);
|
|
|
- velocity_free_rd_ring(vptr);
|
|
|
- velocity_free_dma_rings(vptr);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
- * velocity_open - interface activation callback
|
|
|
- * @dev: network layer device to open
|
|
|
- *
|
|
|
- * Called when the network layer brings the interface up. Returns
|
|
|
- * a negative posix error code on failure, or zero on success.
|
|
|
+ * velocity_free_tx_buf - free transmit buffer
|
|
|
+ * @vptr: velocity
|
|
|
+ * @tdinfo: buffer
|
|
|
*
|
|
|
- * All the ring allocation and set up is done on open for this
|
|
|
- * adapter to minimise memory usage when inactive
|
|
|
+ * Release an transmit buffer. If the buffer was preallocated then
|
|
|
+ * recycle it, if not then unmap the buffer.
|
|
|
*/
|
|
|
-
|
|
|
-static int velocity_open(struct net_device *dev)
|
|
|
+static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
|
|
|
{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- int ret;
|
|
|
-
|
|
|
- ret = velocity_init_rings(vptr, dev->mtu);
|
|
|
- if (ret < 0)
|
|
|
- goto out;
|
|
|
-
|
|
|
- /* Ensure chip is running */
|
|
|
- pci_set_power_state(vptr->pdev, PCI_D0);
|
|
|
-
|
|
|
- velocity_give_many_rx_descs(vptr);
|
|
|
+ struct sk_buff *skb = tdinfo->skb;
|
|
|
+ int i;
|
|
|
+ int pktlen;
|
|
|
|
|
|
- velocity_init_registers(vptr, VELOCITY_INIT_COLD);
|
|
|
+ /*
|
|
|
+ * Don't unmap the pre-allocated tx_bufs
|
|
|
+ */
|
|
|
+ if (tdinfo->skb_dma) {
|
|
|
|
|
|
- ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
|
|
|
- dev->name, dev);
|
|
|
- if (ret < 0) {
|
|
|
- /* Power down the chip */
|
|
|
- pci_set_power_state(vptr->pdev, PCI_D3hot);
|
|
|
- velocity_free_rings(vptr);
|
|
|
- goto out;
|
|
|
+ pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
|
|
|
+ for (i = 0; i < tdinfo->nskb_dma; i++) {
|
|
|
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
|
|
|
+ tdinfo->skb_dma[i] = 0;
|
|
|
+ }
|
|
|
}
|
|
|
-
|
|
|
- mac_enable_int(vptr->mac_regs);
|
|
|
- netif_start_queue(dev);
|
|
|
- vptr->flags |= VELOCITY_FLAGS_OPENED;
|
|
|
-out:
|
|
|
- return ret;
|
|
|
+ dev_kfree_skb_irq(skb);
|
|
|
+ tdinfo->skb = NULL;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * velocity_change_mtu - MTU change callback
|
|
|
- * @dev: network device
|
|
|
- * @new_mtu: desired MTU
|
|
|
- *
|
|
|
- * Handle requests from the networking layer for MTU change on
|
|
|
- * this interface. It gets called on a change by the network layer.
|
|
|
- * Return zero for success or negative posix error code.
|
|
|
- */
|
|
|
|
|
|
-static int velocity_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
+/*
|
|
|
+ * FIXME: could we merge this with velocity_free_tx_buf ?
|
|
|
+ */
|
|
|
+static void velocity_free_td_ring_entry(struct velocity_info *vptr,
|
|
|
+ int q, int n)
|
|
|
{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
|
|
|
- VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
|
|
|
- vptr->dev->name);
|
|
|
- ret = -EINVAL;
|
|
|
- goto out_0;
|
|
|
- }
|
|
|
-
|
|
|
- if (!netif_running(dev)) {
|
|
|
- dev->mtu = new_mtu;
|
|
|
- goto out_0;
|
|
|
- }
|
|
|
+ struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
|
|
|
+ int i;
|
|
|
|
|
|
- if (dev->mtu != new_mtu) {
|
|
|
- struct velocity_info *tmp_vptr;
|
|
|
- unsigned long flags;
|
|
|
- struct rx_info rx;
|
|
|
- struct tx_info tx;
|
|
|
+ if (td_info == NULL)
|
|
|
+ return;
|
|
|
|
|
|
- tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
|
|
|
- if (!tmp_vptr) {
|
|
|
- ret = -ENOMEM;
|
|
|
- goto out_0;
|
|
|
+ if (td_info->skb) {
|
|
|
+ for (i = 0; i < td_info->nskb_dma; i++) {
|
|
|
+ if (td_info->skb_dma[i]) {
|
|
|
+ pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
|
|
|
+ td_info->skb->len, PCI_DMA_TODEVICE);
|
|
|
+ td_info->skb_dma[i] = 0;
|
|
|
+ }
|
|
|
}
|
|
|
-
|
|
|
- tmp_vptr->dev = dev;
|
|
|
- tmp_vptr->pdev = vptr->pdev;
|
|
|
- tmp_vptr->options = vptr->options;
|
|
|
- tmp_vptr->tx.numq = vptr->tx.numq;
|
|
|
-
|
|
|
- ret = velocity_init_rings(tmp_vptr, new_mtu);
|
|
|
- if (ret < 0)
|
|
|
- goto out_free_tmp_vptr_1;
|
|
|
-
|
|
|
- spin_lock_irqsave(&vptr->lock, flags);
|
|
|
-
|
|
|
- netif_stop_queue(dev);
|
|
|
- velocity_shutdown(vptr);
|
|
|
-
|
|
|
- rx = vptr->rx;
|
|
|
- tx = vptr->tx;
|
|
|
-
|
|
|
- vptr->rx = tmp_vptr->rx;
|
|
|
- vptr->tx = tmp_vptr->tx;
|
|
|
-
|
|
|
- tmp_vptr->rx = rx;
|
|
|
- tmp_vptr->tx = tx;
|
|
|
-
|
|
|
- dev->mtu = new_mtu;
|
|
|
-
|
|
|
- velocity_give_many_rx_descs(vptr);
|
|
|
-
|
|
|
- velocity_init_registers(vptr, VELOCITY_INIT_COLD);
|
|
|
-
|
|
|
- mac_enable_int(vptr->mac_regs);
|
|
|
- netif_start_queue(dev);
|
|
|
-
|
|
|
- spin_unlock_irqrestore(&vptr->lock, flags);
|
|
|
-
|
|
|
- velocity_free_rings(tmp_vptr);
|
|
|
-
|
|
|
-out_free_tmp_vptr_1:
|
|
|
- kfree(tmp_vptr);
|
|
|
+ dev_kfree_skb(td_info->skb);
|
|
|
+ td_info->skb = NULL;
|
|
|
}
|
|
|
-out_0:
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * velocity_shutdown - shut down the chip
|
|
|
- * @vptr: velocity to deactivate
|
|
|
- *
|
|
|
- * Shuts down the internal operations of the velocity and
|
|
|
- * disables interrupts, autopolling, transmit and receive
|
|
|
- */
|
|
|
-
|
|
|
-static void velocity_shutdown(struct velocity_info *vptr)
|
|
|
-{
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
- mac_disable_int(regs);
|
|
|
- writel(CR0_STOP, ®s->CR0Set);
|
|
|
- writew(0xFFFF, ®s->TDCSRClr);
|
|
|
- writeb(0xFF, ®s->RDCSRClr);
|
|
|
- safe_disable_mii_autopoll(regs);
|
|
|
- mac_clear_isr(regs);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_close - close adapter callback
|
|
|
- * @dev: network device
|
|
|
+ * velocity_free_td_ring - free td ring
|
|
|
+ * @vptr: velocity
|
|
|
*
|
|
|
- * Callback from the network layer when the velocity is being
|
|
|
- * deactivated by the network layer
|
|
|
- */
|
|
|
-
|
|
|
-static int velocity_close(struct net_device *dev)
|
|
|
-{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
-
|
|
|
- netif_stop_queue(dev);
|
|
|
- velocity_shutdown(vptr);
|
|
|
+ * Free up the transmit ring for this particular velocity adapter.
|
|
|
+ * We free the ring contents but not the ring itself.
|
|
|
+ */
|
|
|
+static void velocity_free_td_ring(struct velocity_info *vptr)
|
|
|
+{
|
|
|
+ int i, j;
|
|
|
|
|
|
- if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
|
|
|
- velocity_get_ip(vptr);
|
|
|
- if (dev->irq != 0)
|
|
|
- free_irq(dev->irq, dev);
|
|
|
+ for (j = 0; j < vptr->tx.numq; j++) {
|
|
|
+ if (vptr->tx.infos[j] == NULL)
|
|
|
+ continue;
|
|
|
+ for (i = 0; i < vptr->options.numtx; i++)
|
|
|
+ velocity_free_td_ring_entry(vptr, j, i);
|
|
|
|
|
|
- /* Power down the chip */
|
|
|
- pci_set_power_state(vptr->pdev, PCI_D3hot);
|
|
|
+ kfree(vptr->tx.infos[j]);
|
|
|
+ vptr->tx.infos[j] = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- velocity_free_rings(vptr);
|
|
|
|
|
|
- vptr->flags &= (~VELOCITY_FLAGS_OPENED);
|
|
|
- return 0;
|
|
|
+static void velocity_free_rings(struct velocity_info *vptr)
|
|
|
+{
|
|
|
+ velocity_free_td_ring(vptr);
|
|
|
+ velocity_free_rd_ring(vptr);
|
|
|
+ velocity_free_dma_rings(vptr);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_xmit - transmit packet callback
|
|
|
- * @skb: buffer to transmit
|
|
|
- * @dev: network device
|
|
|
+ * velocity_error - handle error from controller
|
|
|
+ * @vptr: velocity
|
|
|
+ * @status: card status
|
|
|
+ *
|
|
|
+ * Process an error report from the hardware and attempt to recover
|
|
|
+ * the card itself. At the moment we cannot recover from some
|
|
|
+ * theoretically impossible errors but this could be fixed using
|
|
|
+ * the pci_device_failed logic to bounce the hardware
|
|
|
*
|
|
|
- * Called by the networ layer to request a packet is queued to
|
|
|
- * the velocity. Returns zero on success.
|
|
|
*/
|
|
|
-
|
|
|
-static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
+static void velocity_error(struct velocity_info *vptr, int status)
|
|
|
{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- int qnum = 0;
|
|
|
- struct tx_desc *td_ptr;
|
|
|
- struct velocity_td_info *tdinfo;
|
|
|
- unsigned long flags;
|
|
|
- int pktlen;
|
|
|
- __le16 len;
|
|
|
- int index;
|
|
|
-
|
|
|
-
|
|
|
- if (skb_padto(skb, ETH_ZLEN))
|
|
|
- goto out;
|
|
|
- pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
|
|
|
|
|
|
- len = cpu_to_le16(pktlen);
|
|
|
+ if (status & ISR_TXSTLI) {
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
|
|
|
- spin_lock_irqsave(&vptr->lock, flags);
|
|
|
+ printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0]));
|
|
|
+ BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR);
|
|
|
+ writew(TRDCSR_RUN, ®s->TDCSRClr);
|
|
|
+ netif_stop_queue(vptr->dev);
|
|
|
|
|
|
- index = vptr->tx.curr[qnum];
|
|
|
- td_ptr = &(vptr->tx.rings[qnum][index]);
|
|
|
- tdinfo = &(vptr->tx.infos[qnum][index]);
|
|
|
+ /* FIXME: port over the pci_device_failed code and use it
|
|
|
+ here */
|
|
|
+ }
|
|
|
|
|
|
- td_ptr->tdesc1.TCR = TCR0_TIC;
|
|
|
- td_ptr->td_buf[0].size &= ~TD_QUEUE;
|
|
|
+ if (status & ISR_SRCI) {
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ int linked;
|
|
|
|
|
|
- /*
|
|
|
- * Map the linear network buffer into PCI space and
|
|
|
- * add it to the transmit ring.
|
|
|
- */
|
|
|
- tdinfo->skb = skb;
|
|
|
- tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
|
|
|
- td_ptr->tdesc0.len = len;
|
|
|
- td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
|
|
|
- td_ptr->td_buf[0].pa_high = 0;
|
|
|
- td_ptr->td_buf[0].size = len;
|
|
|
- tdinfo->nskb_dma = 1;
|
|
|
+ if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
|
|
|
+ vptr->mii_status = check_connection_type(regs);
|
|
|
|
|
|
- td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
|
|
|
+ /*
|
|
|
+ * If it is a 3119, disable frame bursting in
|
|
|
+ * halfduplex mode and enable it in fullduplex
|
|
|
+ * mode
|
|
|
+ */
|
|
|
+ if (vptr->rev_id < REV_ID_VT3216_A0) {
|
|
|
+ if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
|
|
|
+ BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
|
|
|
+ else
|
|
|
+ BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * Only enable CD heart beat counter in 10HD mode
|
|
|
+ */
|
|
|
+ if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
|
|
|
+ BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
|
|
|
+ else
|
|
|
+ BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * Get link status from PHYSR0
|
|
|
+ */
|
|
|
+ linked = readb(®s->PHYSR0) & PHYSR0_LINKGD;
|
|
|
|
|
|
- if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
|
|
|
- td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
|
|
|
- td_ptr->tdesc1.TCR |= TCR0_VETAG;
|
|
|
- }
|
|
|
+ if (linked) {
|
|
|
+ vptr->mii_status &= ~VELOCITY_LINK_FAIL;
|
|
|
+ netif_carrier_on(vptr->dev);
|
|
|
+ } else {
|
|
|
+ vptr->mii_status |= VELOCITY_LINK_FAIL;
|
|
|
+ netif_carrier_off(vptr->dev);
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * Handle hardware checksum
|
|
|
- */
|
|
|
- if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
|
|
|
- && (skb->ip_summed == CHECKSUM_PARTIAL)) {
|
|
|
- const struct iphdr *ip = ip_hdr(skb);
|
|
|
- if (ip->protocol == IPPROTO_TCP)
|
|
|
- td_ptr->tdesc1.TCR |= TCR0_TCPCK;
|
|
|
- else if (ip->protocol == IPPROTO_UDP)
|
|
|
- td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
|
|
|
- td_ptr->tdesc1.TCR |= TCR0_IPCK;
|
|
|
- }
|
|
|
- {
|
|
|
+ velocity_print_link_status(vptr);
|
|
|
+ enable_flow_control_ability(vptr);
|
|
|
|
|
|
- int prev = index - 1;
|
|
|
+ /*
|
|
|
+ * Re-enable auto-polling because SRCI will disable
|
|
|
+ * auto-polling
|
|
|
+ */
|
|
|
|
|
|
- if (prev < 0)
|
|
|
- prev = vptr->options.numtx - 1;
|
|
|
- td_ptr->tdesc0.len |= OWNED_BY_NIC;
|
|
|
- vptr->tx.used[qnum]++;
|
|
|
- vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
|
|
|
+ enable_mii_autopoll(regs);
|
|
|
|
|
|
- if (AVAIL_TD(vptr, qnum) < 1)
|
|
|
- netif_stop_queue(dev);
|
|
|
+ if (vptr->mii_status & VELOCITY_LINK_FAIL)
|
|
|
+ netif_stop_queue(vptr->dev);
|
|
|
+ else
|
|
|
+ netif_wake_queue(vptr->dev);
|
|
|
|
|
|
- td_ptr = &(vptr->tx.rings[qnum][prev]);
|
|
|
- td_ptr->td_buf[0].size |= TD_QUEUE;
|
|
|
- mac_tx_queue_wake(vptr->mac_regs, qnum);
|
|
|
- }
|
|
|
- dev->trans_start = jiffies;
|
|
|
- spin_unlock_irqrestore(&vptr->lock, flags);
|
|
|
-out:
|
|
|
- return NETDEV_TX_OK;
|
|
|
+ };
|
|
|
+ if (status & ISR_MIBFI)
|
|
|
+ velocity_update_hw_mibs(vptr);
|
|
|
+ if (status & ISR_LSTEI)
|
|
|
+ mac_rx_queue_wake(vptr->mac_regs);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_intr - interrupt callback
|
|
|
- * @irq: interrupt number
|
|
|
- * @dev_instance: interrupting device
|
|
|
+ * tx_srv - transmit interrupt service
|
|
|
+ * @vptr; Velocity
|
|
|
+ * @status:
|
|
|
*
|
|
|
- * Called whenever an interrupt is generated by the velocity
|
|
|
- * adapter IRQ line. We may not be the source of the interrupt
|
|
|
- * and need to identify initially if we are, and if not exit as
|
|
|
- * efficiently as possible.
|
|
|
+ * Scan the queues looking for transmitted packets that
|
|
|
+ * we can complete and clean up. Update any statistics as
|
|
|
+ * necessary/
|
|
|
*/
|
|
|
-
|
|
|
-static irqreturn_t velocity_intr(int irq, void *dev_instance)
|
|
|
+static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
|
|
|
{
|
|
|
- struct net_device *dev = dev_instance;
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- u32 isr_status;
|
|
|
- int max_count = 0;
|
|
|
+ struct tx_desc *td;
|
|
|
+ int qnum;
|
|
|
+ int full = 0;
|
|
|
+ int idx;
|
|
|
+ int works = 0;
|
|
|
+ struct velocity_td_info *tdinfo;
|
|
|
+ struct net_device_stats *stats = &vptr->dev->stats;
|
|
|
|
|
|
+ for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
|
|
|
+ for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
|
|
|
+ idx = (idx + 1) % vptr->options.numtx) {
|
|
|
|
|
|
- spin_lock(&vptr->lock);
|
|
|
- isr_status = mac_read_isr(vptr->mac_regs);
|
|
|
+ /*
|
|
|
+ * Get Tx Descriptor
|
|
|
+ */
|
|
|
+ td = &(vptr->tx.rings[qnum][idx]);
|
|
|
+ tdinfo = &(vptr->tx.infos[qnum][idx]);
|
|
|
|
|
|
- /* Not us ? */
|
|
|
- if (isr_status == 0) {
|
|
|
- spin_unlock(&vptr->lock);
|
|
|
- return IRQ_NONE;
|
|
|
- }
|
|
|
+ if (td->tdesc0.len & OWNED_BY_NIC)
|
|
|
+ break;
|
|
|
|
|
|
- mac_disable_int(vptr->mac_regs);
|
|
|
+ if ((works++ > 15))
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (td->tdesc0.TSR & TSR0_TERR) {
|
|
|
+ stats->tx_errors++;
|
|
|
+ stats->tx_dropped++;
|
|
|
+ if (td->tdesc0.TSR & TSR0_CDH)
|
|
|
+ stats->tx_heartbeat_errors++;
|
|
|
+ if (td->tdesc0.TSR & TSR0_CRS)
|
|
|
+ stats->tx_carrier_errors++;
|
|
|
+ if (td->tdesc0.TSR & TSR0_ABT)
|
|
|
+ stats->tx_aborted_errors++;
|
|
|
+ if (td->tdesc0.TSR & TSR0_OWC)
|
|
|
+ stats->tx_window_errors++;
|
|
|
+ } else {
|
|
|
+ stats->tx_packets++;
|
|
|
+ stats->tx_bytes += tdinfo->skb->len;
|
|
|
+ }
|
|
|
+ velocity_free_tx_buf(vptr, tdinfo);
|
|
|
+ vptr->tx.used[qnum]--;
|
|
|
+ }
|
|
|
+ vptr->tx.tail[qnum] = idx;
|
|
|
|
|
|
+ if (AVAIL_TD(vptr, qnum) < 1)
|
|
|
+ full = 1;
|
|
|
+ }
|
|
|
/*
|
|
|
- * Keep processing the ISR until we have completed
|
|
|
- * processing and the isr_status becomes zero
|
|
|
+ * Look to see if we should kick the transmit network
|
|
|
+ * layer for more work.
|
|
|
*/
|
|
|
+ if (netif_queue_stopped(vptr->dev) && (full == 0)
|
|
|
+ && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
|
|
|
+ netif_wake_queue(vptr->dev);
|
|
|
+ }
|
|
|
+ return works;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * velocity_rx_csum - checksum process
|
|
|
+ * @rd: receive packet descriptor
|
|
|
+ * @skb: network layer packet buffer
|
|
|
+ *
|
|
|
+ * Process the status bits for the received packet and determine
|
|
|
+ * if the checksum was computed and verified by the hardware
|
|
|
+ */
|
|
|
+static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ skb->ip_summed = CHECKSUM_NONE;
|
|
|
|
|
|
- while (isr_status != 0) {
|
|
|
- mac_write_isr(vptr->mac_regs, isr_status);
|
|
|
- if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
|
|
|
- velocity_error(vptr, isr_status);
|
|
|
- if (isr_status & (ISR_PRXI | ISR_PPRXI))
|
|
|
- max_count += velocity_rx_srv(vptr, isr_status);
|
|
|
- if (isr_status & (ISR_PTXI | ISR_PPTXI))
|
|
|
- max_count += velocity_tx_srv(vptr, isr_status);
|
|
|
- isr_status = mac_read_isr(vptr->mac_regs);
|
|
|
- if (max_count > vptr->options.int_works) {
|
|
|
- printk(KERN_WARNING "%s: excessive work at interrupt.\n",
|
|
|
- dev->name);
|
|
|
- max_count = 0;
|
|
|
+ if (rd->rdesc1.CSM & CSM_IPKT) {
|
|
|
+ if (rd->rdesc1.CSM & CSM_IPOK) {
|
|
|
+ if ((rd->rdesc1.CSM & CSM_TCPKT) ||
|
|
|
+ (rd->rdesc1.CSM & CSM_UDPKT)) {
|
|
|
+ if (!(rd->rdesc1.CSM & CSM_TUPOK))
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
}
|
|
|
}
|
|
|
- spin_unlock(&vptr->lock);
|
|
|
- mac_enable_int(vptr->mac_regs);
|
|
|
- return IRQ_HANDLED;
|
|
|
-
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/**
|
|
|
- * velocity_set_multi - filter list change callback
|
|
|
+ * velocity_rx_copy - in place Rx copy for small packets
|
|
|
+ * @rx_skb: network layer packet buffer candidate
|
|
|
+ * @pkt_size: received data size
|
|
|
+ * @rd: receive packet descriptor
|
|
|
* @dev: network device
|
|
|
*
|
|
|
- * Called by the network layer when the filter lists need to change
|
|
|
- * for a velocity adapter. Reload the CAMs with the new address
|
|
|
- * filter ruleset.
|
|
|
+ * Replace the current skb that is scheduled for Rx processing by a
|
|
|
+ * shorter, immediatly allocated skb, if the received packet is small
|
|
|
+ * enough. This function returns a negative value if the received
|
|
|
+ * packet is too big or if memory is exhausted.
|
|
|
*/
|
|
|
-
|
|
|
-static void velocity_set_multi(struct net_device *dev)
|
|
|
+static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
|
|
|
+ struct velocity_info *vptr)
|
|
|
{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
- u8 rx_mode;
|
|
|
- int i;
|
|
|
- struct dev_mc_list *mclist;
|
|
|
-
|
|
|
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
|
|
|
- writel(0xffffffff, ®s->MARCAM[0]);
|
|
|
- writel(0xffffffff, ®s->MARCAM[4]);
|
|
|
- rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
|
|
|
- } else if ((dev->mc_count > vptr->multicast_limit)
|
|
|
- || (dev->flags & IFF_ALLMULTI)) {
|
|
|
- writel(0xffffffff, ®s->MARCAM[0]);
|
|
|
- writel(0xffffffff, ®s->MARCAM[4]);
|
|
|
- rx_mode = (RCR_AM | RCR_AB);
|
|
|
- } else {
|
|
|
- int offset = MCAM_SIZE - vptr->multicast_limit;
|
|
|
- mac_get_cam_mask(regs, vptr->mCAMmask);
|
|
|
+ int ret = -1;
|
|
|
+ if (pkt_size < rx_copybreak) {
|
|
|
+ struct sk_buff *new_skb;
|
|
|
|
|
|
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
|
|
|
- mac_set_cam(regs, i + offset, mclist->dmi_addr);
|
|
|
- vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
|
|
|
+ new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
|
|
|
+ if (new_skb) {
|
|
|
+ new_skb->ip_summed = rx_skb[0]->ip_summed;
|
|
|
+ skb_reserve(new_skb, 2);
|
|
|
+ skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
|
|
|
+ *rx_skb = new_skb;
|
|
|
+ ret = 0;
|
|
|
}
|
|
|
|
|
|
- mac_set_cam_mask(regs, vptr->mCAMmask);
|
|
|
- rx_mode = RCR_AM | RCR_AB | RCR_AP;
|
|
|
}
|
|
|
- if (dev->mtu > 1500)
|
|
|
- rx_mode |= RCR_AL;
|
|
|
-
|
|
|
- BYTE_REG_BITS_ON(rx_mode, ®s->RCR);
|
|
|
-
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_get_status - statistics callback
|
|
|
- * @dev: network device
|
|
|
+ * velocity_iph_realign - IP header alignment
|
|
|
+ * @vptr: velocity we are handling
|
|
|
+ * @skb: network layer packet buffer
|
|
|
+ * @pkt_size: received data size
|
|
|
*
|
|
|
- * Callback from the network layer to allow driver statistics
|
|
|
- * to be resynchronized with hardware collected state. In the
|
|
|
- * case of the velocity we need to pull the MIB counters from
|
|
|
- * the hardware into the counters before letting the network
|
|
|
- * layer display them.
|
|
|
+ * Align IP header on a 2 bytes boundary. This behavior can be
|
|
|
+ * configured by the user.
|
|
|
*/
|
|
|
-
|
|
|
-static struct net_device_stats *velocity_get_stats(struct net_device *dev)
|
|
|
+static inline void velocity_iph_realign(struct velocity_info *vptr,
|
|
|
+ struct sk_buff *skb, int pkt_size)
|
|
|
{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
|
|
|
+ memmove(skb->data + 2, skb->data, pkt_size);
|
|
|
+ skb_reserve(skb, 2);
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- /* If the hardware is down, don't touch MII */
|
|
|
- if (!netif_running(dev))
|
|
|
- return &dev->stats;
|
|
|
|
|
|
- spin_lock_irq(&vptr->lock);
|
|
|
- velocity_update_hw_mibs(vptr);
|
|
|
- spin_unlock_irq(&vptr->lock);
|
|
|
+/**
|
|
|
+ * velocity_receive_frame - received packet processor
|
|
|
+ * @vptr: velocity we are handling
|
|
|
+ * @idx: ring index
|
|
|
+ *
|
|
|
+ * A packet has arrived. We process the packet and if appropriate
|
|
|
+ * pass the frame up the network stack
|
|
|
+ */
|
|
|
+static int velocity_receive_frame(struct velocity_info *vptr, int idx)
|
|
|
+{
|
|
|
+ void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
|
|
|
+ struct net_device_stats *stats = &vptr->dev->stats;
|
|
|
+ struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
|
|
|
+ struct rx_desc *rd = &(vptr->rx.ring[idx]);
|
|
|
+ int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
|
|
|
+ struct sk_buff *skb;
|
|
|
|
|
|
- dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
|
|
|
- dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
|
|
|
- dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
|
|
|
+ if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
|
|
|
+ stats->rx_length_errors++;
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
-// unsigned long rx_dropped; /* no space in linux buffers */
|
|
|
- dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
|
|
|
- /* detailed rx_errors: */
|
|
|
-// unsigned long rx_length_errors;
|
|
|
-// unsigned long rx_over_errors; /* receiver ring buff overflow */
|
|
|
- dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
|
|
|
-// unsigned long rx_frame_errors; /* recv'd frame alignment error */
|
|
|
-// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
|
|
|
-// unsigned long rx_missed_errors; /* receiver missed packet */
|
|
|
+ if (rd->rdesc0.RSR & RSR_MAR)
|
|
|
+ stats->multicast++;
|
|
|
|
|
|
- /* detailed tx_errors */
|
|
|
-// unsigned long tx_fifo_errors;
|
|
|
+ skb = rd_info->skb;
|
|
|
|
|
|
- return &dev->stats;
|
|
|
-}
|
|
|
+ pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
|
|
|
+ vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
|
|
|
|
|
|
+ /*
|
|
|
+ * Drop frame not meeting IEEE 802.3
|
|
|
+ */
|
|
|
|
|
|
-/**
|
|
|
- * velocity_ioctl - ioctl entry point
|
|
|
- * @dev: network device
|
|
|
- * @rq: interface request ioctl
|
|
|
- * @cmd: command code
|
|
|
- *
|
|
|
- * Called when the user issues an ioctl request to the network
|
|
|
- * device in question. The velocity interface supports MII.
|
|
|
- */
|
|
|
+ if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
|
|
|
+ if (rd->rdesc0.RSR & RSR_RL) {
|
|
|
+ stats->rx_length_errors++;
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
-static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|
|
-{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- int ret;
|
|
|
+ pci_action = pci_dma_sync_single_for_device;
|
|
|
|
|
|
- /* If we are asked for information and the device is power
|
|
|
- saving then we need to bring the device back up to talk to it */
|
|
|
+ velocity_rx_csum(rd, skb);
|
|
|
|
|
|
- if (!netif_running(dev))
|
|
|
- pci_set_power_state(vptr->pdev, PCI_D0);
|
|
|
+ if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
|
|
|
+ velocity_iph_realign(vptr, skb, pkt_len);
|
|
|
+ pci_action = pci_unmap_single;
|
|
|
+ rd_info->skb = NULL;
|
|
|
+ }
|
|
|
|
|
|
- switch (cmd) {
|
|
|
- case SIOCGMIIPHY: /* Get address of MII PHY in use. */
|
|
|
- case SIOCGMIIREG: /* Read MII PHY register. */
|
|
|
- case SIOCSMIIREG: /* Write to MII PHY register. */
|
|
|
- ret = velocity_mii_ioctl(dev, rq, cmd);
|
|
|
- break;
|
|
|
+ pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
|
|
|
- default:
|
|
|
- ret = -EOPNOTSUPP;
|
|
|
- }
|
|
|
- if (!netif_running(dev))
|
|
|
- pci_set_power_state(vptr->pdev, PCI_D3hot);
|
|
|
+ skb_put(skb, pkt_len - 4);
|
|
|
+ skb->protocol = eth_type_trans(skb, vptr->dev);
|
|
|
+
|
|
|
+ if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
|
|
|
+ vlan_hwaccel_rx(skb, vptr->vlgrp,
|
|
|
+ swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
|
|
|
+ } else
|
|
|
+ netif_rx(skb);
|
|
|
|
|
|
+ stats->rx_bytes += pkt_len;
|
|
|
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Definition for our device driver. The PCI layer interface
|
|
|
- * uses this to handle all our card discover and plugging
|
|
|
- */
|
|
|
-
|
|
|
-static struct pci_driver velocity_driver = {
|
|
|
- .name = VELOCITY_NAME,
|
|
|
- .id_table = velocity_id_table,
|
|
|
- .probe = velocity_found1,
|
|
|
- .remove = __devexit_p(velocity_remove1),
|
|
|
-#ifdef CONFIG_PM
|
|
|
- .suspend = velocity_suspend,
|
|
|
- .resume = velocity_resume,
|
|
|
-#endif
|
|
|
-};
|
|
|
|
|
|
/**
|
|
|
- * velocity_init_module - load time function
|
|
|
+ * velocity_rx_srv - service RX interrupt
|
|
|
+ * @vptr: velocity
|
|
|
+ * @status: adapter status (unused)
|
|
|
*
|
|
|
- * Called when the velocity module is loaded. The PCI driver
|
|
|
- * is registered with the PCI layer, and in turn will call
|
|
|
- * the probe functions for each velocity adapter installed
|
|
|
- * in the system.
|
|
|
+ * Walk the receive ring of the velocity adapter and remove
|
|
|
+ * any received packets from the receive queue. Hand the ring
|
|
|
+ * slots back to the adapter for reuse.
|
|
|
*/
|
|
|
-
|
|
|
-static int __init velocity_init_module(void)
|
|
|
+static int velocity_rx_srv(struct velocity_info *vptr, int status)
|
|
|
{
|
|
|
- int ret;
|
|
|
+ struct net_device_stats *stats = &vptr->dev->stats;
|
|
|
+ int rd_curr = vptr->rx.curr;
|
|
|
+ int works = 0;
|
|
|
+
|
|
|
+ do {
|
|
|
+ struct rx_desc *rd = vptr->rx.ring + rd_curr;
|
|
|
+
|
|
|
+ if (!vptr->rx.info[rd_curr].skb)
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (rd->rdesc0.len & OWNED_BY_NIC)
|
|
|
+ break;
|
|
|
+
|
|
|
+ rmb();
|
|
|
|
|
|
- velocity_register_notifier();
|
|
|
- ret = pci_register_driver(&velocity_driver);
|
|
|
- if (ret < 0)
|
|
|
- velocity_unregister_notifier();
|
|
|
- return ret;
|
|
|
-}
|
|
|
+ /*
|
|
|
+ * Don't drop CE or RL error frame although RXOK is off
|
|
|
+ */
|
|
|
+ if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
|
|
|
+ if (velocity_receive_frame(vptr, rd_curr) < 0)
|
|
|
+ stats->rx_dropped++;
|
|
|
+ } else {
|
|
|
+ if (rd->rdesc0.RSR & RSR_CRC)
|
|
|
+ stats->rx_crc_errors++;
|
|
|
+ if (rd->rdesc0.RSR & RSR_FAE)
|
|
|
+ stats->rx_frame_errors++;
|
|
|
|
|
|
-/**
|
|
|
- * velocity_cleanup - module unload
|
|
|
- *
|
|
|
- * When the velocity hardware is unloaded this function is called.
|
|
|
- * It will clean up the notifiers and the unregister the PCI
|
|
|
- * driver interface for this hardware. This in turn cleans up
|
|
|
- * all discovered interfaces before returning from the function
|
|
|
- */
|
|
|
+ stats->rx_dropped++;
|
|
|
+ }
|
|
|
|
|
|
-static void __exit velocity_cleanup_module(void)
|
|
|
-{
|
|
|
- velocity_unregister_notifier();
|
|
|
- pci_unregister_driver(&velocity_driver);
|
|
|
-}
|
|
|
+ rd->size |= RX_INTEN;
|
|
|
|
|
|
-module_init(velocity_init_module);
|
|
|
-module_exit(velocity_cleanup_module);
|
|
|
+ rd_curr++;
|
|
|
+ if (rd_curr >= vptr->options.numrx)
|
|
|
+ rd_curr = 0;
|
|
|
+ } while (++works <= 15);
|
|
|
|
|
|
+ vptr->rx.curr = rd_curr;
|
|
|
|
|
|
-/*
|
|
|
- * MII access , media link mode setting functions
|
|
|
- */
|
|
|
+ if ((works > 0) && (velocity_rx_refill(vptr) > 0))
|
|
|
+ velocity_give_many_rx_descs(vptr);
|
|
|
+
|
|
|
+ VAR_USED(stats);
|
|
|
+ return works;
|
|
|
+}
|
|
|
|
|
|
|
|
|
/**
|
|
|
- * mii_init - set up MII
|
|
|
- * @vptr: velocity adapter
|
|
|
- * @mii_status: links tatus
|
|
|
+ * velocity_intr - interrupt callback
|
|
|
+ * @irq: interrupt number
|
|
|
+ * @dev_instance: interrupting device
|
|
|
*
|
|
|
- * Set up the PHY for the current link state.
|
|
|
+ * Called whenever an interrupt is generated by the velocity
|
|
|
+ * adapter IRQ line. We may not be the source of the interrupt
|
|
|
+ * and need to identify initially if we are, and if not exit as
|
|
|
+ * efficiently as possible.
|
|
|
*/
|
|
|
-
|
|
|
-static void mii_init(struct velocity_info *vptr, u32 mii_status)
|
|
|
+static irqreturn_t velocity_intr(int irq, void *dev_instance)
|
|
|
{
|
|
|
- u16 BMCR;
|
|
|
+ struct net_device *dev = dev_instance;
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ u32 isr_status;
|
|
|
+ int max_count = 0;
|
|
|
|
|
|
- switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
|
|
|
- case PHYID_CICADA_CS8201:
|
|
|
- /*
|
|
|
- * Reset to hardware default
|
|
|
- */
|
|
|
- MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
|
|
|
- /*
|
|
|
- * Turn on ECHODIS bit in NWay-forced full mode and turn it
|
|
|
- * off it in NWay-forced half mode for NWay-forced v.s.
|
|
|
- * legacy-forced issue.
|
|
|
- */
|
|
|
- if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
|
|
|
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
|
|
|
- else
|
|
|
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
|
|
|
- /*
|
|
|
- * Turn on Link/Activity LED enable bit for CIS8201
|
|
|
- */
|
|
|
- MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
|
|
|
- break;
|
|
|
- case PHYID_VT3216_32BIT:
|
|
|
- case PHYID_VT3216_64BIT:
|
|
|
- /*
|
|
|
- * Reset to hardware default
|
|
|
- */
|
|
|
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
|
|
|
- /*
|
|
|
- * Turn on ECHODIS bit in NWay-forced full mode and turn it
|
|
|
- * off it in NWay-forced half mode for NWay-forced v.s.
|
|
|
- * legacy-forced issue
|
|
|
- */
|
|
|
- if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
|
|
|
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
|
|
|
- else
|
|
|
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
|
|
|
- break;
|
|
|
|
|
|
- case PHYID_MARVELL_1000:
|
|
|
- case PHYID_MARVELL_1000S:
|
|
|
- /*
|
|
|
- * Assert CRS on Transmit
|
|
|
- */
|
|
|
- MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
|
|
|
- /*
|
|
|
- * Reset to hardware default
|
|
|
- */
|
|
|
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
|
|
|
- break;
|
|
|
- default:
|
|
|
- ;
|
|
|
- }
|
|
|
- velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
|
|
|
- if (BMCR & BMCR_ISO) {
|
|
|
- BMCR &= ~BMCR_ISO;
|
|
|
- velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
|
|
|
+ spin_lock(&vptr->lock);
|
|
|
+ isr_status = mac_read_isr(vptr->mac_regs);
|
|
|
+
|
|
|
+ /* Not us ? */
|
|
|
+ if (isr_status == 0) {
|
|
|
+ spin_unlock(&vptr->lock);
|
|
|
+ return IRQ_NONE;
|
|
|
}
|
|
|
-}
|
|
|
|
|
|
-/**
|
|
|
- * safe_disable_mii_autopoll - autopoll off
|
|
|
- * @regs: velocity registers
|
|
|
- *
|
|
|
- * Turn off the autopoll and wait for it to disable on the chip
|
|
|
- */
|
|
|
+ mac_disable_int(vptr->mac_regs);
|
|
|
|
|
|
-static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
|
|
|
-{
|
|
|
- u16 ww;
|
|
|
+ /*
|
|
|
+ * Keep processing the ISR until we have completed
|
|
|
+ * processing and the isr_status becomes zero
|
|
|
+ */
|
|
|
|
|
|
- /* turn off MAUTO */
|
|
|
- writeb(0, ®s->MIICR);
|
|
|
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
|
|
|
- udelay(1);
|
|
|
- if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
|
|
|
- break;
|
|
|
+ while (isr_status != 0) {
|
|
|
+ mac_write_isr(vptr->mac_regs, isr_status);
|
|
|
+ if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
|
|
|
+ velocity_error(vptr, isr_status);
|
|
|
+ if (isr_status & (ISR_PRXI | ISR_PPRXI))
|
|
|
+ max_count += velocity_rx_srv(vptr, isr_status);
|
|
|
+ if (isr_status & (ISR_PTXI | ISR_PPTXI))
|
|
|
+ max_count += velocity_tx_srv(vptr, isr_status);
|
|
|
+ isr_status = mac_read_isr(vptr->mac_regs);
|
|
|
+ if (max_count > vptr->options.int_works) {
|
|
|
+ printk(KERN_WARNING "%s: excessive work at interrupt.\n",
|
|
|
+ dev->name);
|
|
|
+ max_count = 0;
|
|
|
+ }
|
|
|
}
|
|
|
+ spin_unlock(&vptr->lock);
|
|
|
+ mac_enable_int(vptr->mac_regs);
|
|
|
+ return IRQ_HANDLED;
|
|
|
+
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * enable_mii_autopoll - turn on autopolling
|
|
|
- * @regs: velocity registers
|
|
|
+ * velocity_open - interface activation callback
|
|
|
+ * @dev: network layer device to open
|
|
|
*
|
|
|
- * Enable the MII link status autopoll feature on the Velocity
|
|
|
- * hardware. Wait for it to enable.
|
|
|
+ * Called when the network layer brings the interface up. Returns
|
|
|
+ * a negative posix error code on failure, or zero on success.
|
|
|
+ *
|
|
|
+ * All the ring allocation and set up is done on open for this
|
|
|
+ * adapter to minimise memory usage when inactive
|
|
|
*/
|
|
|
-
|
|
|
-static void enable_mii_autopoll(struct mac_regs __iomem *regs)
|
|
|
+static int velocity_open(struct net_device *dev)
|
|
|
{
|
|
|
- int ii;
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ int ret;
|
|
|
|
|
|
- writeb(0, &(regs->MIICR));
|
|
|
- writeb(MIIADR_SWMPL, ®s->MIIADR);
|
|
|
+ ret = velocity_init_rings(vptr, dev->mtu);
|
|
|
+ if (ret < 0)
|
|
|
+ goto out;
|
|
|
|
|
|
- for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
|
|
|
- udelay(1);
|
|
|
- if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
|
|
|
- break;
|
|
|
- }
|
|
|
+ /* Ensure chip is running */
|
|
|
+ pci_set_power_state(vptr->pdev, PCI_D0);
|
|
|
|
|
|
- writeb(MIICR_MAUTO, ®s->MIICR);
|
|
|
+ velocity_give_many_rx_descs(vptr);
|
|
|
|
|
|
- for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
|
|
|
- udelay(1);
|
|
|
- if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
|
|
|
- break;
|
|
|
+ velocity_init_registers(vptr, VELOCITY_INIT_COLD);
|
|
|
+
|
|
|
+ ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
|
|
|
+ dev->name, dev);
|
|
|
+ if (ret < 0) {
|
|
|
+ /* Power down the chip */
|
|
|
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
|
|
|
+ velocity_free_rings(vptr);
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
+ mac_enable_int(vptr->mac_regs);
|
|
|
+ netif_start_queue(dev);
|
|
|
+ vptr->flags |= VELOCITY_FLAGS_OPENED;
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_mii_read - read MII data
|
|
|
- * @regs: velocity registers
|
|
|
- * @index: MII register index
|
|
|
- * @data: buffer for received data
|
|
|
+ * velocity_shutdown - shut down the chip
|
|
|
+ * @vptr: velocity to deactivate
|
|
|
*
|
|
|
- * Perform a single read of an MII 16bit register. Returns zero
|
|
|
- * on success or -ETIMEDOUT if the PHY did not respond.
|
|
|
+ * Shuts down the internal operations of the velocity and
|
|
|
+ * disables interrupts, autopolling, transmit and receive
|
|
|
*/
|
|
|
-
|
|
|
-static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
|
|
|
+static void velocity_shutdown(struct velocity_info *vptr)
|
|
|
{
|
|
|
- u16 ww;
|
|
|
-
|
|
|
- /*
|
|
|
- * Disable MIICR_MAUTO, so that mii addr can be set normally
|
|
|
- */
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ mac_disable_int(regs);
|
|
|
+ writel(CR0_STOP, ®s->CR0Set);
|
|
|
+ writew(0xFFFF, ®s->TDCSRClr);
|
|
|
+ writeb(0xFF, ®s->RDCSRClr);
|
|
|
safe_disable_mii_autopoll(regs);
|
|
|
+ mac_clear_isr(regs);
|
|
|
+}
|
|
|
|
|
|
- writeb(index, ®s->MIIADR);
|
|
|
+/**
|
|
|
+ * velocity_change_mtu - MTU change callback
|
|
|
+ * @dev: network device
|
|
|
+ * @new_mtu: desired MTU
|
|
|
+ *
|
|
|
+ * Handle requests from the networking layer for MTU change on
|
|
|
+ * this interface. It gets called on a change by the network layer.
|
|
|
+ * Return zero for success or negative posix error code.
|
|
|
+ */
|
|
|
+static int velocity_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
+{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ int ret = 0;
|
|
|
|
|
|
- BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR);
|
|
|
+ if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
|
|
|
+ VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
|
|
|
+ vptr->dev->name);
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto out_0;
|
|
|
+ }
|
|
|
|
|
|
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
|
|
|
- if (!(readb(®s->MIICR) & MIICR_RCMD))
|
|
|
- break;
|
|
|
+ if (!netif_running(dev)) {
|
|
|
+ dev->mtu = new_mtu;
|
|
|
+ goto out_0;
|
|
|
}
|
|
|
|
|
|
- *data = readw(®s->MIIDATA);
|
|
|
+ if (dev->mtu != new_mtu) {
|
|
|
+ struct velocity_info *tmp_vptr;
|
|
|
+ unsigned long flags;
|
|
|
+ struct rx_info rx;
|
|
|
+ struct tx_info tx;
|
|
|
|
|
|
- enable_mii_autopoll(regs);
|
|
|
- if (ww == W_MAX_TIMEOUT)
|
|
|
- return -ETIMEDOUT;
|
|
|
- return 0;
|
|
|
-}
|
|
|
+ tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
|
|
|
+ if (!tmp_vptr) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out_0;
|
|
|
+ }
|
|
|
|
|
|
-/**
|
|
|
- * velocity_mii_write - write MII data
|
|
|
- * @regs: velocity registers
|
|
|
- * @index: MII register index
|
|
|
- * @data: 16bit data for the MII register
|
|
|
- *
|
|
|
- * Perform a single write to an MII 16bit register. Returns zero
|
|
|
- * on success or -ETIMEDOUT if the PHY did not respond.
|
|
|
- */
|
|
|
+ tmp_vptr->dev = dev;
|
|
|
+ tmp_vptr->pdev = vptr->pdev;
|
|
|
+ tmp_vptr->options = vptr->options;
|
|
|
+ tmp_vptr->tx.numq = vptr->tx.numq;
|
|
|
|
|
|
-static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
|
|
|
-{
|
|
|
- u16 ww;
|
|
|
+ ret = velocity_init_rings(tmp_vptr, new_mtu);
|
|
|
+ if (ret < 0)
|
|
|
+ goto out_free_tmp_vptr_1;
|
|
|
|
|
|
- /*
|
|
|
- * Disable MIICR_MAUTO, so that mii addr can be set normally
|
|
|
- */
|
|
|
- safe_disable_mii_autopoll(regs);
|
|
|
+ spin_lock_irqsave(&vptr->lock, flags);
|
|
|
|
|
|
- /* MII reg offset */
|
|
|
- writeb(mii_addr, ®s->MIIADR);
|
|
|
- /* set MII data */
|
|
|
- writew(data, ®s->MIIDATA);
|
|
|
+ netif_stop_queue(dev);
|
|
|
+ velocity_shutdown(vptr);
|
|
|
|
|
|
- /* turn on MIICR_WCMD */
|
|
|
- BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR);
|
|
|
+ rx = vptr->rx;
|
|
|
+ tx = vptr->tx;
|
|
|
|
|
|
- /* W_MAX_TIMEOUT is the timeout period */
|
|
|
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
|
|
|
- udelay(5);
|
|
|
- if (!(readb(®s->MIICR) & MIICR_WCMD))
|
|
|
- break;
|
|
|
- }
|
|
|
- enable_mii_autopoll(regs);
|
|
|
+ vptr->rx = tmp_vptr->rx;
|
|
|
+ vptr->tx = tmp_vptr->tx;
|
|
|
|
|
|
- if (ww == W_MAX_TIMEOUT)
|
|
|
- return -ETIMEDOUT;
|
|
|
- return 0;
|
|
|
-}
|
|
|
+ tmp_vptr->rx = rx;
|
|
|
+ tmp_vptr->tx = tx;
|
|
|
|
|
|
-/**
|
|
|
- * velocity_get_opt_media_mode - get media selection
|
|
|
- * @vptr: velocity adapter
|
|
|
- *
|
|
|
- * Get the media mode stored in EEPROM or module options and load
|
|
|
- * mii_status accordingly. The requested link state information
|
|
|
- * is also returned.
|
|
|
- */
|
|
|
+ dev->mtu = new_mtu;
|
|
|
|
|
|
-static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
|
|
|
-{
|
|
|
- u32 status = 0;
|
|
|
+ velocity_give_many_rx_descs(vptr);
|
|
|
|
|
|
- switch (vptr->options.spd_dpx) {
|
|
|
- case SPD_DPX_AUTO:
|
|
|
- status = VELOCITY_AUTONEG_ENABLE;
|
|
|
- break;
|
|
|
- case SPD_DPX_100_FULL:
|
|
|
- status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
|
|
|
- break;
|
|
|
- case SPD_DPX_10_FULL:
|
|
|
- status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
|
|
|
- break;
|
|
|
- case SPD_DPX_100_HALF:
|
|
|
- status = VELOCITY_SPEED_100;
|
|
|
- break;
|
|
|
- case SPD_DPX_10_HALF:
|
|
|
- status = VELOCITY_SPEED_10;
|
|
|
- break;
|
|
|
- }
|
|
|
- vptr->mii_status = status;
|
|
|
- return status;
|
|
|
-}
|
|
|
+ velocity_init_registers(vptr, VELOCITY_INIT_COLD);
|
|
|
|
|
|
-/**
|
|
|
- * mii_set_auto_on - autonegotiate on
|
|
|
- * @vptr: velocity
|
|
|
- *
|
|
|
- * Enable autonegotation on this interface
|
|
|
- */
|
|
|
+ mac_enable_int(vptr->mac_regs);
|
|
|
+ netif_start_queue(dev);
|
|
|
|
|
|
-static void mii_set_auto_on(struct velocity_info *vptr)
|
|
|
-{
|
|
|
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
|
|
|
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
|
|
|
- else
|
|
|
- MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
|
|
|
-}
|
|
|
+ spin_unlock_irqrestore(&vptr->lock, flags);
|
|
|
|
|
|
+ velocity_free_rings(tmp_vptr);
|
|
|
|
|
|
-/*
|
|
|
-static void mii_set_auto_off(struct velocity_info *vptr)
|
|
|
-{
|
|
|
- MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
|
|
|
+out_free_tmp_vptr_1:
|
|
|
+ kfree(tmp_vptr);
|
|
|
+ }
|
|
|
+out_0:
|
|
|
+ return ret;
|
|
|
}
|
|
|
-*/
|
|
|
|
|
|
/**
|
|
|
- * set_mii_flow_control - flow control setup
|
|
|
- * @vptr: velocity interface
|
|
|
+ * velocity_mii_ioctl - MII ioctl handler
|
|
|
+ * @dev: network device
|
|
|
+ * @ifr: the ifreq block for the ioctl
|
|
|
+ * @cmd: the command
|
|
|
*
|
|
|
- * Set up the flow control on this interface according to
|
|
|
- * the supplied user/eeprom options.
|
|
|
+ * Process MII requests made via ioctl from the network layer. These
|
|
|
+ * are used by tools like kudzu to interrogate the link state of the
|
|
|
+ * hardware
|
|
|
*/
|
|
|
-
|
|
|
-static void set_mii_flow_control(struct velocity_info *vptr)
|
|
|
+static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|
|
{
|
|
|
- /*Enable or Disable PAUSE in ANAR */
|
|
|
- switch (vptr->options.flow_cntl) {
|
|
|
- case FLOW_CNTL_TX:
|
|
|
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
|
|
|
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
|
|
|
- break;
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ unsigned long flags;
|
|
|
+ struct mii_ioctl_data *miidata = if_mii(ifr);
|
|
|
+ int err;
|
|
|
|
|
|
- case FLOW_CNTL_RX:
|
|
|
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
|
|
|
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ switch (cmd) {
|
|
|
+ case SIOCGMIIPHY:
|
|
|
+ miidata->phy_id = readb(®s->MIIADR) & 0x1f;
|
|
|
break;
|
|
|
-
|
|
|
- case FLOW_CNTL_TX_RX:
|
|
|
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
|
|
|
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ case SIOCGMIIREG:
|
|
|
+ if (!capable(CAP_NET_ADMIN))
|
|
|
+ return -EPERM;
|
|
|
+ if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
|
|
|
+ return -ETIMEDOUT;
|
|
|
break;
|
|
|
-
|
|
|
- case FLOW_CNTL_DISABLE:
|
|
|
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
|
|
|
- MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
|
|
|
+ case SIOCSMIIREG:
|
|
|
+ if (!capable(CAP_NET_ADMIN))
|
|
|
+ return -EPERM;
|
|
|
+ spin_lock_irqsave(&vptr->lock, flags);
|
|
|
+ err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
|
|
|
+ spin_unlock_irqrestore(&vptr->lock, flags);
|
|
|
+ check_connection_type(vptr->mac_regs);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
break;
|
|
|
default:
|
|
|
- break;
|
|
|
+ return -EOPNOTSUPP;
|
|
|
}
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
/**
|
|
|
- * velocity_set_media_mode - set media mode
|
|
|
- * @mii_status: old MII link state
|
|
|
+ * velocity_ioctl - ioctl entry point
|
|
|
+ * @dev: network device
|
|
|
+ * @rq: interface request ioctl
|
|
|
+ * @cmd: command code
|
|
|
*
|
|
|
- * Check the media link state and configure the flow control
|
|
|
- * PHY and also velocity hardware setup accordingly. In particular
|
|
|
- * we need to set up CD polling and frame bursting.
|
|
|
+ * Called when the user issues an ioctl request to the network
|
|
|
+ * device in question. The velocity interface supports MII.
|
|
|
*/
|
|
|
+static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|
|
+{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ int ret;
|
|
|
|
|
|
-static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
|
|
|
-{
|
|
|
- u32 curr_status;
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
-
|
|
|
- vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
|
|
|
- curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
|
|
|
-
|
|
|
- /* Set mii link status */
|
|
|
- set_mii_flow_control(vptr);
|
|
|
-
|
|
|
- /*
|
|
|
- Check if new status is consisent with current status
|
|
|
- if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
|
|
|
- || (mii_status==curr_status)) {
|
|
|
- vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
|
|
|
- vptr->mii_status=check_connection_type(vptr->mac_regs);
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
|
|
|
- return 0;
|
|
|
- }
|
|
|
- */
|
|
|
-
|
|
|
- if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
|
|
|
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
|
|
|
-
|
|
|
- /*
|
|
|
- * If connection type is AUTO
|
|
|
- */
|
|
|
- if (mii_status & VELOCITY_AUTONEG_ENABLE) {
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
|
|
|
- /* clear force MAC mode bit */
|
|
|
- BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
|
|
|
- /* set duplex mode of MAC according to duplex mode of MII */
|
|
|
- MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
|
|
|
- MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
|
|
|
- MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
|
|
|
-
|
|
|
- /* enable AUTO-NEGO mode */
|
|
|
- mii_set_auto_on(vptr);
|
|
|
- } else {
|
|
|
- u16 ANAR;
|
|
|
- u8 CHIPGCR;
|
|
|
-
|
|
|
- /*
|
|
|
- * 1. if it's 3119, disable frame bursting in halfduplex mode
|
|
|
- * and enable it in fullduplex mode
|
|
|
- * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
|
|
|
- * 3. only enable CD heart beat counter in 10HD mode
|
|
|
- */
|
|
|
-
|
|
|
- /* set force MAC mode bit */
|
|
|
- BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
|
|
|
+ /* If we are asked for information and the device is power
|
|
|
+ saving then we need to bring the device back up to talk to it */
|
|
|
|
|
|
- CHIPGCR = readb(®s->CHIPGCR);
|
|
|
- CHIPGCR &= ~CHIPGCR_FCGMII;
|
|
|
+ if (!netif_running(dev))
|
|
|
+ pci_set_power_state(vptr->pdev, PCI_D0);
|
|
|
|
|
|
- if (mii_status & VELOCITY_DUPLEX_FULL) {
|
|
|
- CHIPGCR |= CHIPGCR_FCFDX;
|
|
|
- writeb(CHIPGCR, ®s->CHIPGCR);
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
|
|
|
- if (vptr->rev_id < REV_ID_VT3216_A0)
|
|
|
- BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
|
|
|
- } else {
|
|
|
- CHIPGCR &= ~CHIPGCR_FCFDX;
|
|
|
- VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
|
|
|
- writeb(CHIPGCR, ®s->CHIPGCR);
|
|
|
- if (vptr->rev_id < REV_ID_VT3216_A0)
|
|
|
- BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
|
|
|
- }
|
|
|
+ switch (cmd) {
|
|
|
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
|
|
|
+ case SIOCGMIIREG: /* Read MII PHY register. */
|
|
|
+ case SIOCSMIIREG: /* Write to MII PHY register. */
|
|
|
+ ret = velocity_mii_ioctl(dev, rq, cmd);
|
|
|
+ break;
|
|
|
|
|
|
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
|
|
|
+ default:
|
|
|
+ ret = -EOPNOTSUPP;
|
|
|
+ }
|
|
|
+ if (!netif_running(dev))
|
|
|
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
|
|
|
|
|
|
- if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
|
|
|
- BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
|
|
|
- else
|
|
|
- BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
|
|
|
|
|
|
- /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
|
|
|
- velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
|
|
|
- ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
|
|
|
- if (mii_status & VELOCITY_SPEED_100) {
|
|
|
- if (mii_status & VELOCITY_DUPLEX_FULL)
|
|
|
- ANAR |= ANAR_TXFD;
|
|
|
- else
|
|
|
- ANAR |= ANAR_TX;
|
|
|
- } else {
|
|
|
- if (mii_status & VELOCITY_DUPLEX_FULL)
|
|
|
- ANAR |= ANAR_10FD;
|
|
|
- else
|
|
|
- ANAR |= ANAR_10;
|
|
|
- }
|
|
|
- velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
|
|
|
- /* enable AUTO-NEGO mode */
|
|
|
- mii_set_auto_on(vptr);
|
|
|
- /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
|
|
|
- }
|
|
|
- /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
|
|
|
- /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
|
|
|
- return VELOCITY_LINK_CHANGE;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * mii_check_media_mode - check media state
|
|
|
- * @regs: velocity registers
|
|
|
+ * velocity_get_status - statistics callback
|
|
|
+ * @dev: network device
|
|
|
*
|
|
|
- * Check the current MII status and determine the link status
|
|
|
- * accordingly
|
|
|
+ * Callback from the network layer to allow driver statistics
|
|
|
+ * to be resynchronized with hardware collected state. In the
|
|
|
+ * case of the velocity we need to pull the MIB counters from
|
|
|
+ * the hardware into the counters before letting the network
|
|
|
+ * layer display them.
|
|
|
*/
|
|
|
-
|
|
|
-static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
|
|
|
+static struct net_device_stats *velocity_get_stats(struct net_device *dev)
|
|
|
{
|
|
|
- u32 status = 0;
|
|
|
- u16 ANAR;
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
|
|
|
- if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
|
|
|
- status |= VELOCITY_LINK_FAIL;
|
|
|
+ /* If the hardware is down, don't touch MII */
|
|
|
+ if (!netif_running(dev))
|
|
|
+ return &dev->stats;
|
|
|
|
|
|
- if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
|
|
|
- status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
|
|
|
- else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
|
|
|
- status |= (VELOCITY_SPEED_1000);
|
|
|
- else {
|
|
|
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
|
|
|
- if (ANAR & ANAR_TXFD)
|
|
|
- status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
|
|
|
- else if (ANAR & ANAR_TX)
|
|
|
- status |= VELOCITY_SPEED_100;
|
|
|
- else if (ANAR & ANAR_10FD)
|
|
|
- status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
|
|
|
- else
|
|
|
- status |= (VELOCITY_SPEED_10);
|
|
|
- }
|
|
|
+ spin_lock_irq(&vptr->lock);
|
|
|
+ velocity_update_hw_mibs(vptr);
|
|
|
+ spin_unlock_irq(&vptr->lock);
|
|
|
|
|
|
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
|
|
|
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
|
|
|
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
|
|
|
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
|
|
|
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
|
|
|
- status |= VELOCITY_AUTONEG_ENABLE;
|
|
|
- }
|
|
|
- }
|
|
|
+ dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
|
|
|
+ dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
|
|
|
+ dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
|
|
|
|
|
|
- return status;
|
|
|
+// unsigned long rx_dropped; /* no space in linux buffers */
|
|
|
+ dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
|
|
|
+ /* detailed rx_errors: */
|
|
|
+// unsigned long rx_length_errors;
|
|
|
+// unsigned long rx_over_errors; /* receiver ring buff overflow */
|
|
|
+ dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
|
|
|
+// unsigned long rx_frame_errors; /* recv'd frame alignment error */
|
|
|
+// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
|
|
|
+// unsigned long rx_missed_errors; /* receiver missed packet */
|
|
|
+
|
|
|
+ /* detailed tx_errors */
|
|
|
+// unsigned long tx_fifo_errors;
|
|
|
+
|
|
|
+ return &dev->stats;
|
|
|
}
|
|
|
|
|
|
-static u32 check_connection_type(struct mac_regs __iomem *regs)
|
|
|
+/**
|
|
|
+ * velocity_close - close adapter callback
|
|
|
+ * @dev: network device
|
|
|
+ *
|
|
|
+ * Callback from the network layer when the velocity is being
|
|
|
+ * deactivated by the network layer
|
|
|
+ */
|
|
|
+static int velocity_close(struct net_device *dev)
|
|
|
{
|
|
|
- u32 status = 0;
|
|
|
- u8 PHYSR0;
|
|
|
- u16 ANAR;
|
|
|
- PHYSR0 = readb(®s->PHYSR0);
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
|
|
|
- /*
|
|
|
- if (!(PHYSR0 & PHYSR0_LINKGD))
|
|
|
- status|=VELOCITY_LINK_FAIL;
|
|
|
- */
|
|
|
+ netif_stop_queue(dev);
|
|
|
+ velocity_shutdown(vptr);
|
|
|
|
|
|
- if (PHYSR0 & PHYSR0_FDPX)
|
|
|
- status |= VELOCITY_DUPLEX_FULL;
|
|
|
+ if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
|
|
|
+ velocity_get_ip(vptr);
|
|
|
+ if (dev->irq != 0)
|
|
|
+ free_irq(dev->irq, dev);
|
|
|
|
|
|
- if (PHYSR0 & PHYSR0_SPDG)
|
|
|
- status |= VELOCITY_SPEED_1000;
|
|
|
- else if (PHYSR0 & PHYSR0_SPD10)
|
|
|
- status |= VELOCITY_SPEED_10;
|
|
|
- else
|
|
|
- status |= VELOCITY_SPEED_100;
|
|
|
+ /* Power down the chip */
|
|
|
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
|
|
|
|
|
|
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
|
|
|
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
|
|
|
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
|
|
|
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
|
|
|
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
|
|
|
- status |= VELOCITY_AUTONEG_ENABLE;
|
|
|
- }
|
|
|
- }
|
|
|
+ velocity_free_rings(vptr);
|
|
|
|
|
|
- return status;
|
|
|
+ vptr->flags &= (~VELOCITY_FLAGS_OPENED);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * enable_flow_control_ability - flow control
|
|
|
- * @vptr: veloity to configure
|
|
|
+ * velocity_xmit - transmit packet callback
|
|
|
+ * @skb: buffer to transmit
|
|
|
+ * @dev: network device
|
|
|
*
|
|
|
- * Set up flow control according to the flow control options
|
|
|
- * determined by the eeprom/configuration.
|
|
|
+ * Called by the networ layer to request a packet is queued to
|
|
|
+ * the velocity. Returns zero on success.
|
|
|
*/
|
|
|
-
|
|
|
-static void enable_flow_control_ability(struct velocity_info *vptr)
|
|
|
+static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ int qnum = 0;
|
|
|
+ struct tx_desc *td_ptr;
|
|
|
+ struct velocity_td_info *tdinfo;
|
|
|
+ unsigned long flags;
|
|
|
+ int pktlen;
|
|
|
+ __le16 len;
|
|
|
+ int index;
|
|
|
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ if (skb_padto(skb, ETH_ZLEN))
|
|
|
+ goto out;
|
|
|
+ pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
|
|
|
|
|
|
- switch (vptr->options.flow_cntl) {
|
|
|
+ len = cpu_to_le16(pktlen);
|
|
|
|
|
|
- case FLOW_CNTL_DEFAULT:
|
|
|
- if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0))
|
|
|
- writel(CR0_FDXRFCEN, ®s->CR0Set);
|
|
|
- else
|
|
|
- writel(CR0_FDXRFCEN, ®s->CR0Clr);
|
|
|
+ spin_lock_irqsave(&vptr->lock, flags);
|
|
|
|
|
|
- if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0))
|
|
|
- writel(CR0_FDXTFCEN, ®s->CR0Set);
|
|
|
- else
|
|
|
- writel(CR0_FDXTFCEN, ®s->CR0Clr);
|
|
|
- break;
|
|
|
+ index = vptr->tx.curr[qnum];
|
|
|
+ td_ptr = &(vptr->tx.rings[qnum][index]);
|
|
|
+ tdinfo = &(vptr->tx.infos[qnum][index]);
|
|
|
|
|
|
- case FLOW_CNTL_TX:
|
|
|
- writel(CR0_FDXTFCEN, ®s->CR0Set);
|
|
|
- writel(CR0_FDXRFCEN, ®s->CR0Clr);
|
|
|
- break;
|
|
|
+ td_ptr->tdesc1.TCR = TCR0_TIC;
|
|
|
+ td_ptr->td_buf[0].size &= ~TD_QUEUE;
|
|
|
|
|
|
- case FLOW_CNTL_RX:
|
|
|
- writel(CR0_FDXRFCEN, ®s->CR0Set);
|
|
|
- writel(CR0_FDXTFCEN, ®s->CR0Clr);
|
|
|
- break;
|
|
|
+ /*
|
|
|
+ * Map the linear network buffer into PCI space and
|
|
|
+ * add it to the transmit ring.
|
|
|
+ */
|
|
|
+ tdinfo->skb = skb;
|
|
|
+ tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
|
|
|
+ td_ptr->tdesc0.len = len;
|
|
|
+ td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
|
|
|
+ td_ptr->td_buf[0].pa_high = 0;
|
|
|
+ td_ptr->td_buf[0].size = len;
|
|
|
+ tdinfo->nskb_dma = 1;
|
|
|
+
|
|
|
+ td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
|
|
|
+
|
|
|
+ if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
|
|
|
+ td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
|
|
|
+ td_ptr->tdesc1.TCR |= TCR0_VETAG;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Handle hardware checksum
|
|
|
+ */
|
|
|
+ if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
|
|
|
+ && (skb->ip_summed == CHECKSUM_PARTIAL)) {
|
|
|
+ const struct iphdr *ip = ip_hdr(skb);
|
|
|
+ if (ip->protocol == IPPROTO_TCP)
|
|
|
+ td_ptr->tdesc1.TCR |= TCR0_TCPCK;
|
|
|
+ else if (ip->protocol == IPPROTO_UDP)
|
|
|
+ td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
|
|
|
+ td_ptr->tdesc1.TCR |= TCR0_IPCK;
|
|
|
+ }
|
|
|
+ {
|
|
|
|
|
|
- case FLOW_CNTL_TX_RX:
|
|
|
- writel(CR0_FDXTFCEN, ®s->CR0Set);
|
|
|
- writel(CR0_FDXRFCEN, ®s->CR0Set);
|
|
|
- break;
|
|
|
+ int prev = index - 1;
|
|
|
|
|
|
- case FLOW_CNTL_DISABLE:
|
|
|
- writel(CR0_FDXRFCEN, ®s->CR0Clr);
|
|
|
- writel(CR0_FDXTFCEN, ®s->CR0Clr);
|
|
|
- break;
|
|
|
+ if (prev < 0)
|
|
|
+ prev = vptr->options.numtx - 1;
|
|
|
+ td_ptr->tdesc0.len |= OWNED_BY_NIC;
|
|
|
+ vptr->tx.used[qnum]++;
|
|
|
+ vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
|
|
|
|
|
|
- default:
|
|
|
- break;
|
|
|
- }
|
|
|
+ if (AVAIL_TD(vptr, qnum) < 1)
|
|
|
+ netif_stop_queue(dev);
|
|
|
|
|
|
+ td_ptr = &(vptr->tx.rings[qnum][prev]);
|
|
|
+ td_ptr->td_buf[0].size |= TD_QUEUE;
|
|
|
+ mac_tx_queue_wake(vptr->mac_regs, qnum);
|
|
|
+ }
|
|
|
+ dev->trans_start = jiffies;
|
|
|
+ spin_unlock_irqrestore(&vptr->lock, flags);
|
|
|
+out:
|
|
|
+ return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
|
|
|
+static const struct net_device_ops velocity_netdev_ops = {
|
|
|
+ .ndo_open = velocity_open,
|
|
|
+ .ndo_stop = velocity_close,
|
|
|
+ .ndo_start_xmit = velocity_xmit,
|
|
|
+ .ndo_get_stats = velocity_get_stats,
|
|
|
+ .ndo_validate_addr = eth_validate_addr,
|
|
|
+ .ndo_set_mac_address = eth_mac_addr,
|
|
|
+ .ndo_set_multicast_list = velocity_set_multi,
|
|
|
+ .ndo_change_mtu = velocity_change_mtu,
|
|
|
+ .ndo_do_ioctl = velocity_ioctl,
|
|
|
+ .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
|
|
|
+ .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
|
|
|
+ .ndo_vlan_rx_register = velocity_vlan_rx_register,
|
|
|
+};
|
|
|
+
|
|
|
/**
|
|
|
- * velocity_ethtool_up - pre hook for ethtool
|
|
|
- * @dev: network device
|
|
|
+ * velocity_init_info - init private data
|
|
|
+ * @pdev: PCI device
|
|
|
+ * @vptr: Velocity info
|
|
|
+ * @info: Board type
|
|
|
*
|
|
|
- * Called before an ethtool operation. We need to make sure the
|
|
|
- * chip is out of D3 state before we poke at it.
|
|
|
+ * Set up the initial velocity_info struct for the device that has been
|
|
|
+ * discovered.
|
|
|
*/
|
|
|
-
|
|
|
-static int velocity_ethtool_up(struct net_device *dev)
|
|
|
+static void __devinit velocity_init_info(struct pci_dev *pdev,
|
|
|
+ struct velocity_info *vptr,
|
|
|
+ const struct velocity_info_tbl *info)
|
|
|
{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- if (!netif_running(dev))
|
|
|
- pci_set_power_state(vptr->pdev, PCI_D0);
|
|
|
- return 0;
|
|
|
+ memset(vptr, 0, sizeof(struct velocity_info));
|
|
|
+
|
|
|
+ vptr->pdev = pdev;
|
|
|
+ vptr->chip_id = info->chip_id;
|
|
|
+ vptr->tx.numq = info->txqueue;
|
|
|
+ vptr->multicast_limit = MCAM_SIZE;
|
|
|
+ spin_lock_init(&vptr->lock);
|
|
|
+ INIT_LIST_HEAD(&vptr->list);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * velocity_ethtool_down - post hook for ethtool
|
|
|
- * @dev: network device
|
|
|
+ * velocity_get_pci_info - retrieve PCI info for device
|
|
|
+ * @vptr: velocity device
|
|
|
+ * @pdev: PCI device it matches
|
|
|
*
|
|
|
- * Called after an ethtool operation. Restore the chip back to D3
|
|
|
- * state if it isn't running.
|
|
|
+ * Retrieve the PCI configuration space data that interests us from
|
|
|
+ * the kernel PCI layer
|
|
|
*/
|
|
|
-
|
|
|
-static void velocity_ethtool_down(struct net_device *dev)
|
|
|
+static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
|
|
|
{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- if (!netif_running(dev))
|
|
|
- pci_set_power_state(vptr->pdev, PCI_D3hot);
|
|
|
-}
|
|
|
+ vptr->rev_id = pdev->revision;
|
|
|
|
|
|
-static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
|
-{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
- u32 status;
|
|
|
- status = check_connection_type(vptr->mac_regs);
|
|
|
+ pci_set_master(pdev);
|
|
|
|
|
|
- cmd->supported = SUPPORTED_TP |
|
|
|
- SUPPORTED_Autoneg |
|
|
|
- SUPPORTED_10baseT_Half |
|
|
|
- SUPPORTED_10baseT_Full |
|
|
|
- SUPPORTED_100baseT_Half |
|
|
|
- SUPPORTED_100baseT_Full |
|
|
|
- SUPPORTED_1000baseT_Half |
|
|
|
- SUPPORTED_1000baseT_Full;
|
|
|
- if (status & VELOCITY_SPEED_1000)
|
|
|
- cmd->speed = SPEED_1000;
|
|
|
- else if (status & VELOCITY_SPEED_100)
|
|
|
- cmd->speed = SPEED_100;
|
|
|
- else
|
|
|
- cmd->speed = SPEED_10;
|
|
|
- cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
|
|
|
- cmd->port = PORT_TP;
|
|
|
- cmd->transceiver = XCVR_INTERNAL;
|
|
|
- cmd->phy_address = readb(®s->MIIADR) & 0x1F;
|
|
|
+ vptr->ioaddr = pci_resource_start(pdev, 0);
|
|
|
+ vptr->memaddr = pci_resource_start(pdev, 1);
|
|
|
|
|
|
- if (status & VELOCITY_DUPLEX_FULL)
|
|
|
- cmd->duplex = DUPLEX_FULL;
|
|
|
- else
|
|
|
- cmd->duplex = DUPLEX_HALF;
|
|
|
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
|
|
|
+ dev_err(&pdev->dev,
|
|
|
+ "region #0 is not an I/O resource, aborting.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
|
|
|
+ dev_err(&pdev->dev,
|
|
|
+ "region #1 is an I/O resource, aborting.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
|
|
|
+ dev_err(&pdev->dev, "region #1 is too small.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ vptr->pdev = pdev;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
|
+/**
|
|
|
+ * velocity_print_info - per driver data
|
|
|
+ * @vptr: velocity
|
|
|
+ *
|
|
|
+ * Print per driver data as the kernel driver finds Velocity
|
|
|
+ * hardware
|
|
|
+ */
|
|
|
+static void __devinit velocity_print_info(struct velocity_info *vptr)
|
|
|
{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- u32 curr_status;
|
|
|
- u32 new_status = 0;
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- curr_status = check_connection_type(vptr->mac_regs);
|
|
|
- curr_status &= (~VELOCITY_LINK_FAIL);
|
|
|
-
|
|
|
- new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
|
|
|
- new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
|
|
|
- new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
|
|
|
- new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
|
|
|
-
|
|
|
- if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE)))
|
|
|
- ret = -EINVAL;
|
|
|
- else
|
|
|
- velocity_set_media_mode(vptr, new_status);
|
|
|
+ struct net_device *dev = vptr->dev;
|
|
|
|
|
|
- return ret;
|
|
|
+ printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
|
|
|
+ printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
|
|
|
+ dev->name,
|
|
|
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
|
|
|
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
|
|
|
}
|
|
|
|
|
|
static u32 velocity_get_link(struct net_device *dev)
|
|
@@ -3016,195 +2647,165 @@ static u32 velocity_get_link(struct net_device *dev)
|
|
|
return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0;
|
|
|
}
|
|
|
|
|
|
-static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
|
|
-{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- strcpy(info->driver, VELOCITY_NAME);
|
|
|
- strcpy(info->version, VELOCITY_VERSION);
|
|
|
- strcpy(info->bus_info, pci_name(vptr->pdev));
|
|
|
-}
|
|
|
|
|
|
-static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|
|
+/**
|
|
|
+ * velocity_found1 - set up discovered velocity card
|
|
|
+ * @pdev: PCI device
|
|
|
+ * @ent: PCI device table entry that matched
|
|
|
+ *
|
|
|
+ * Configure a discovered adapter from scratch. Return a negative
|
|
|
+ * errno error code on failure paths.
|
|
|
+ */
|
|
|
+static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
|
|
|
- wol->wolopts |= WAKE_MAGIC;
|
|
|
- /*
|
|
|
- if (vptr->wol_opts & VELOCITY_WOL_PHY)
|
|
|
- wol.wolopts|=WAKE_PHY;
|
|
|
- */
|
|
|
- if (vptr->wol_opts & VELOCITY_WOL_UCAST)
|
|
|
- wol->wolopts |= WAKE_UCAST;
|
|
|
- if (vptr->wol_opts & VELOCITY_WOL_ARP)
|
|
|
- wol->wolopts |= WAKE_ARP;
|
|
|
- memcpy(&wol->sopass, vptr->wol_passwd, 6);
|
|
|
-}
|
|
|
+ static int first = 1;
|
|
|
+ struct net_device *dev;
|
|
|
+ int i;
|
|
|
+ const char *drv_string;
|
|
|
+ const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
|
|
|
+ struct velocity_info *vptr;
|
|
|
+ struct mac_regs __iomem *regs;
|
|
|
+ int ret = -ENOMEM;
|
|
|
|
|
|
-static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|
|
-{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ /* FIXME: this driver, like almost all other ethernet drivers,
|
|
|
+ * can support more than MAX_UNITS.
|
|
|
+ */
|
|
|
+ if (velocity_nics >= MAX_UNITS) {
|
|
|
+ dev_notice(&pdev->dev, "already found %d NICs.\n",
|
|
|
+ velocity_nics);
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
|
|
|
- if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
|
|
|
- return -EFAULT;
|
|
|
- vptr->wol_opts = VELOCITY_WOL_MAGIC;
|
|
|
+ dev = alloc_etherdev(sizeof(struct velocity_info));
|
|
|
+ if (!dev) {
|
|
|
+ dev_err(&pdev->dev, "allocate net device failed.\n");
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- if (wol.wolopts & WAKE_PHY) {
|
|
|
- vptr->wol_opts|=VELOCITY_WOL_PHY;
|
|
|
- vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
|
|
|
- }
|
|
|
- */
|
|
|
+ /* Chain it all together */
|
|
|
|
|
|
- if (wol->wolopts & WAKE_MAGIC) {
|
|
|
- vptr->wol_opts |= VELOCITY_WOL_MAGIC;
|
|
|
- vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
|
|
|
+ SET_NETDEV_DEV(dev, &pdev->dev);
|
|
|
+ vptr = netdev_priv(dev);
|
|
|
+
|
|
|
+
|
|
|
+ if (first) {
|
|
|
+ printk(KERN_INFO "%s Ver. %s\n",
|
|
|
+ VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
|
|
|
+ printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
|
|
|
+ printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
|
|
|
+ first = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ velocity_init_info(pdev, vptr, info);
|
|
|
+
|
|
|
+ vptr->dev = dev;
|
|
|
+
|
|
|
+ dev->irq = pdev->irq;
|
|
|
+
|
|
|
+ ret = pci_enable_device(pdev);
|
|
|
+ if (ret < 0)
|
|
|
+ goto err_free_dev;
|
|
|
+
|
|
|
+ ret = velocity_get_pci_info(vptr, pdev);
|
|
|
+ if (ret < 0) {
|
|
|
+ /* error message already printed */
|
|
|
+ goto err_disable;
|
|
|
}
|
|
|
- if (wol->wolopts & WAKE_UCAST) {
|
|
|
- vptr->wol_opts |= VELOCITY_WOL_UCAST;
|
|
|
- vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
|
|
|
+
|
|
|
+ ret = pci_request_regions(pdev, VELOCITY_NAME);
|
|
|
+ if (ret < 0) {
|
|
|
+ dev_err(&pdev->dev, "No PCI resources.\n");
|
|
|
+ goto err_disable;
|
|
|
}
|
|
|
- if (wol->wolopts & WAKE_ARP) {
|
|
|
- vptr->wol_opts |= VELOCITY_WOL_ARP;
|
|
|
- vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
|
|
|
+
|
|
|
+ regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
|
|
|
+ if (regs == NULL) {
|
|
|
+ ret = -EIO;
|
|
|
+ goto err_release_res;
|
|
|
}
|
|
|
- memcpy(vptr->wol_passwd, wol->sopass, 6);
|
|
|
- return 0;
|
|
|
-}
|
|
|
|
|
|
-static u32 velocity_get_msglevel(struct net_device *dev)
|
|
|
-{
|
|
|
- return msglevel;
|
|
|
-}
|
|
|
+ vptr->mac_regs = regs;
|
|
|
|
|
|
-static void velocity_set_msglevel(struct net_device *dev, u32 value)
|
|
|
-{
|
|
|
- msglevel = value;
|
|
|
-}
|
|
|
+ mac_wol_reset(regs);
|
|
|
|
|
|
-static const struct ethtool_ops velocity_ethtool_ops = {
|
|
|
- .get_settings = velocity_get_settings,
|
|
|
- .set_settings = velocity_set_settings,
|
|
|
- .get_drvinfo = velocity_get_drvinfo,
|
|
|
- .get_wol = velocity_ethtool_get_wol,
|
|
|
- .set_wol = velocity_ethtool_set_wol,
|
|
|
- .get_msglevel = velocity_get_msglevel,
|
|
|
- .set_msglevel = velocity_set_msglevel,
|
|
|
- .get_link = velocity_get_link,
|
|
|
- .begin = velocity_ethtool_up,
|
|
|
- .complete = velocity_ethtool_down
|
|
|
-};
|
|
|
+ dev->base_addr = vptr->ioaddr;
|
|
|
|
|
|
-/**
|
|
|
- * velocity_mii_ioctl - MII ioctl handler
|
|
|
- * @dev: network device
|
|
|
- * @ifr: the ifreq block for the ioctl
|
|
|
- * @cmd: the command
|
|
|
- *
|
|
|
- * Process MII requests made via ioctl from the network layer. These
|
|
|
- * are used by tools like kudzu to interrogate the link state of the
|
|
|
- * hardware
|
|
|
- */
|
|
|
+ for (i = 0; i < 6; i++)
|
|
|
+ dev->dev_addr[i] = readb(®s->PAR[i]);
|
|
|
|
|
|
-static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|
|
-{
|
|
|
- struct velocity_info *vptr = netdev_priv(dev);
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
- unsigned long flags;
|
|
|
- struct mii_ioctl_data *miidata = if_mii(ifr);
|
|
|
- int err;
|
|
|
|
|
|
- switch (cmd) {
|
|
|
- case SIOCGMIIPHY:
|
|
|
- miidata->phy_id = readb(®s->MIIADR) & 0x1f;
|
|
|
- break;
|
|
|
- case SIOCGMIIREG:
|
|
|
- if (!capable(CAP_NET_ADMIN))
|
|
|
- return -EPERM;
|
|
|
- if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
|
|
|
- return -ETIMEDOUT;
|
|
|
- break;
|
|
|
- case SIOCSMIIREG:
|
|
|
- if (!capable(CAP_NET_ADMIN))
|
|
|
- return -EPERM;
|
|
|
- spin_lock_irqsave(&vptr->lock, flags);
|
|
|
- err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
|
|
|
- spin_unlock_irqrestore(&vptr->lock, flags);
|
|
|
- check_connection_type(vptr->mac_regs);
|
|
|
- if (err)
|
|
|
- return err;
|
|
|
- break;
|
|
|
- default:
|
|
|
- return -EOPNOTSUPP;
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
+ drv_string = dev_driver_string(&pdev->dev);
|
|
|
|
|
|
-#ifdef CONFIG_PM
|
|
|
+ velocity_get_options(&vptr->options, velocity_nics, drv_string);
|
|
|
|
|
|
-/**
|
|
|
- * velocity_save_context - save registers
|
|
|
- * @vptr: velocity
|
|
|
- * @context: buffer for stored context
|
|
|
- *
|
|
|
- * Retrieve the current configuration from the velocity hardware
|
|
|
- * and stash it in the context structure, for use by the context
|
|
|
- * restore functions. This allows us to save things we need across
|
|
|
- * power down states
|
|
|
- */
|
|
|
+ /*
|
|
|
+ * Mask out the options cannot be set to the chip
|
|
|
+ */
|
|
|
|
|
|
-static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
|
|
|
-{
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
- u16 i;
|
|
|
- u8 __iomem *ptr = (u8 __iomem *)regs;
|
|
|
+ vptr->options.flags &= info->flags;
|
|
|
|
|
|
- for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
|
|
|
- *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
|
|
|
+ /*
|
|
|
+ * Enable the chip specified capbilities
|
|
|
+ */
|
|
|
|
|
|
- for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
|
|
|
- *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
|
|
|
+ vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
|
|
|
|
|
|
- for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
|
|
|
- *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
|
|
|
+ vptr->wol_opts = vptr->options.wol_opts;
|
|
|
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
|
|
|
|
|
|
-}
|
|
|
+ vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
|
|
|
|
|
|
-/**
|
|
|
- * velocity_restore_context - restore registers
|
|
|
- * @vptr: velocity
|
|
|
- * @context: buffer for stored context
|
|
|
- *
|
|
|
- * Reload the register configuration from the velocity context
|
|
|
- * created by velocity_save_context.
|
|
|
- */
|
|
|
+ dev->irq = pdev->irq;
|
|
|
+ dev->netdev_ops = &velocity_netdev_ops;
|
|
|
+ dev->ethtool_ops = &velocity_ethtool_ops;
|
|
|
|
|
|
-static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
|
|
|
-{
|
|
|
- struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
- int i;
|
|
|
- u8 __iomem *ptr = (u8 __iomem *)regs;
|
|
|
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
|
|
|
+ NETIF_F_HW_VLAN_RX;
|
|
|
|
|
|
- for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
|
|
|
- writel(*((u32 *) (context->mac_reg + i)), ptr + i);
|
|
|
+ if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
|
|
|
+ dev->features |= NETIF_F_IP_CSUM;
|
|
|
|
|
|
- /* Just skip cr0 */
|
|
|
- for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
|
|
|
- /* Clear */
|
|
|
- writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
|
|
|
- /* Set */
|
|
|
- writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
|
|
|
+ ret = register_netdev(dev);
|
|
|
+ if (ret < 0)
|
|
|
+ goto err_iounmap;
|
|
|
+
|
|
|
+ if (!velocity_get_link(dev)) {
|
|
|
+ netif_carrier_off(dev);
|
|
|
+ vptr->mii_status |= VELOCITY_LINK_FAIL;
|
|
|
}
|
|
|
|
|
|
- for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
|
|
|
- writel(*((u32 *) (context->mac_reg + i)), ptr + i);
|
|
|
+ velocity_print_info(vptr);
|
|
|
+ pci_set_drvdata(pdev, dev);
|
|
|
|
|
|
- for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
|
|
|
- writel(*((u32 *) (context->mac_reg + i)), ptr + i);
|
|
|
+ /* and leave the chip powered down */
|
|
|
|
|
|
- for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
|
|
|
- writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
|
|
|
+ pci_set_power_state(pdev, PCI_D3hot);
|
|
|
+#ifdef CONFIG_PM
|
|
|
+ {
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&velocity_dev_list_lock, flags);
|
|
|
+ list_add(&vptr->list, &velocity_dev_list);
|
|
|
+ spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ velocity_nics++;
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
+
|
|
|
+err_iounmap:
|
|
|
+ iounmap(regs);
|
|
|
+err_release_res:
|
|
|
+ pci_release_regions(pdev);
|
|
|
+err_disable:
|
|
|
+ pci_disable_device(pdev);
|
|
|
+err_free_dev:
|
|
|
+ free_netdev(dev);
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+#ifdef CONFIG_PM
|
|
|
/**
|
|
|
* wol_calc_crc - WOL CRC
|
|
|
* @pattern: data pattern
|
|
@@ -3213,7 +2814,6 @@ static void velocity_restore_context(struct velocity_info *vptr, struct velocity
|
|
|
* Compute the wake on lan crc hashes for the packet header
|
|
|
* we are interested in.
|
|
|
*/
|
|
|
-
|
|
|
static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
|
|
|
{
|
|
|
u16 crc = 0xFFFF;
|
|
@@ -3250,7 +2850,6 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
|
|
|
*
|
|
|
* FIXME: check static buffer is safe here
|
|
|
*/
|
|
|
-
|
|
|
static int velocity_set_wol(struct velocity_info *vptr)
|
|
|
{
|
|
|
struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
@@ -3327,6 +2926,33 @@ static int velocity_set_wol(struct velocity_info *vptr)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * velocity_save_context - save registers
|
|
|
+ * @vptr: velocity
|
|
|
+ * @context: buffer for stored context
|
|
|
+ *
|
|
|
+ * Retrieve the current configuration from the velocity hardware
|
|
|
+ * and stash it in the context structure, for use by the context
|
|
|
+ * restore functions. This allows us to save things we need across
|
|
|
+ * power down states
|
|
|
+ */
|
|
|
+static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
|
|
|
+{
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ u16 i;
|
|
|
+ u8 __iomem *ptr = (u8 __iomem *)regs;
|
|
|
+
|
|
|
+ for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
|
|
|
+ *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
|
|
|
+
|
|
|
+ for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
|
|
|
+ *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
|
|
|
+
|
|
|
+ for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
|
|
|
+ *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
|
{
|
|
|
struct net_device *dev = pci_get_drvdata(pdev);
|
|
@@ -3354,11 +2980,46 @@ static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
|
pci_disable_device(pdev);
|
|
|
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
|
|
}
|
|
|
-#else
|
|
|
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
|
|
-#endif
|
|
|
- spin_unlock_irqrestore(&vptr->lock, flags);
|
|
|
- return 0;
|
|
|
+#else
|
|
|
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
|
|
+#endif
|
|
|
+ spin_unlock_irqrestore(&vptr->lock, flags);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * velocity_restore_context - restore registers
|
|
|
+ * @vptr: velocity
|
|
|
+ * @context: buffer for stored context
|
|
|
+ *
|
|
|
+ * Reload the register configuration from the velocity context
|
|
|
+ * created by velocity_save_context.
|
|
|
+ */
|
|
|
+static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
|
|
|
+{
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ int i;
|
|
|
+ u8 __iomem *ptr = (u8 __iomem *)regs;
|
|
|
+
|
|
|
+ for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
|
|
|
+ writel(*((u32 *) (context->mac_reg + i)), ptr + i);
|
|
|
+
|
|
|
+ /* Just skip cr0 */
|
|
|
+ for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
|
|
|
+ /* Clear */
|
|
|
+ writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
|
|
|
+ /* Set */
|
|
|
+ writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
|
|
|
+ }
|
|
|
+
|
|
|
+ for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
|
|
|
+ writel(*((u32 *) (context->mac_reg + i)), ptr + i);
|
|
|
+
|
|
|
+ for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
|
|
|
+ writel(*((u32 *) (context->mac_reg + i)), ptr + i);
|
|
|
+
|
|
|
+ for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
|
|
|
+ writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
|
|
|
}
|
|
|
|
|
|
static int velocity_resume(struct pci_dev *pdev)
|
|
@@ -3395,9 +3056,190 @@ static int velocity_resume(struct pci_dev *pdev)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
-#ifdef CONFIG_INET
|
|
|
+/*
|
|
|
+ * Definition for our device driver. The PCI layer interface
|
|
|
+ * uses this to handle all our card discover and plugging
|
|
|
+ */
|
|
|
+static struct pci_driver velocity_driver = {
|
|
|
+ .name = VELOCITY_NAME,
|
|
|
+ .id_table = velocity_id_table,
|
|
|
+ .probe = velocity_found1,
|
|
|
+ .remove = __devexit_p(velocity_remove1),
|
|
|
+#ifdef CONFIG_PM
|
|
|
+ .suspend = velocity_suspend,
|
|
|
+ .resume = velocity_resume,
|
|
|
+#endif
|
|
|
+};
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * velocity_ethtool_up - pre hook for ethtool
|
|
|
+ * @dev: network device
|
|
|
+ *
|
|
|
+ * Called before an ethtool operation. We need to make sure the
|
|
|
+ * chip is out of D3 state before we poke at it.
|
|
|
+ */
|
|
|
+static int velocity_ethtool_up(struct net_device *dev)
|
|
|
+{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ if (!netif_running(dev))
|
|
|
+ pci_set_power_state(vptr->pdev, PCI_D0);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * velocity_ethtool_down - post hook for ethtool
|
|
|
+ * @dev: network device
|
|
|
+ *
|
|
|
+ * Called after an ethtool operation. Restore the chip back to D3
|
|
|
+ * state if it isn't running.
|
|
|
+ */
|
|
|
+static void velocity_ethtool_down(struct net_device *dev)
|
|
|
+{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ if (!netif_running(dev))
|
|
|
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
|
|
|
+}
|
|
|
+
|
|
|
+static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
|
+{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ struct mac_regs __iomem *regs = vptr->mac_regs;
|
|
|
+ u32 status;
|
|
|
+ status = check_connection_type(vptr->mac_regs);
|
|
|
+
|
|
|
+ cmd->supported = SUPPORTED_TP |
|
|
|
+ SUPPORTED_Autoneg |
|
|
|
+ SUPPORTED_10baseT_Half |
|
|
|
+ SUPPORTED_10baseT_Full |
|
|
|
+ SUPPORTED_100baseT_Half |
|
|
|
+ SUPPORTED_100baseT_Full |
|
|
|
+ SUPPORTED_1000baseT_Half |
|
|
|
+ SUPPORTED_1000baseT_Full;
|
|
|
+ if (status & VELOCITY_SPEED_1000)
|
|
|
+ cmd->speed = SPEED_1000;
|
|
|
+ else if (status & VELOCITY_SPEED_100)
|
|
|
+ cmd->speed = SPEED_100;
|
|
|
+ else
|
|
|
+ cmd->speed = SPEED_10;
|
|
|
+ cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
|
|
|
+ cmd->port = PORT_TP;
|
|
|
+ cmd->transceiver = XCVR_INTERNAL;
|
|
|
+ cmd->phy_address = readb(®s->MIIADR) & 0x1F;
|
|
|
+
|
|
|
+ if (status & VELOCITY_DUPLEX_FULL)
|
|
|
+ cmd->duplex = DUPLEX_FULL;
|
|
|
+ else
|
|
|
+ cmd->duplex = DUPLEX_HALF;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
|
+{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ u32 curr_status;
|
|
|
+ u32 new_status = 0;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ curr_status = check_connection_type(vptr->mac_regs);
|
|
|
+ curr_status &= (~VELOCITY_LINK_FAIL);
|
|
|
+
|
|
|
+ new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
|
|
|
+ new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
|
|
|
+ new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
|
|
|
+ new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
|
|
|
+
|
|
|
+ if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE)))
|
|
|
+ ret = -EINVAL;
|
|
|
+ else
|
|
|
+ velocity_set_media_mode(vptr, new_status);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
|
|
+{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ strcpy(info->driver, VELOCITY_NAME);
|
|
|
+ strcpy(info->version, VELOCITY_VERSION);
|
|
|
+ strcpy(info->bus_info, pci_name(vptr->pdev));
|
|
|
+}
|
|
|
+
|
|
|
+static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|
|
+{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+ wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
|
|
|
+ wol->wolopts |= WAKE_MAGIC;
|
|
|
+ /*
|
|
|
+ if (vptr->wol_opts & VELOCITY_WOL_PHY)
|
|
|
+ wol.wolopts|=WAKE_PHY;
|
|
|
+ */
|
|
|
+ if (vptr->wol_opts & VELOCITY_WOL_UCAST)
|
|
|
+ wol->wolopts |= WAKE_UCAST;
|
|
|
+ if (vptr->wol_opts & VELOCITY_WOL_ARP)
|
|
|
+ wol->wolopts |= WAKE_ARP;
|
|
|
+ memcpy(&wol->sopass, vptr->wol_passwd, 6);
|
|
|
+}
|
|
|
+
|
|
|
+static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|
|
+{
|
|
|
+ struct velocity_info *vptr = netdev_priv(dev);
|
|
|
+
|
|
|
+ if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
|
|
|
+ return -EFAULT;
|
|
|
+ vptr->wol_opts = VELOCITY_WOL_MAGIC;
|
|
|
+
|
|
|
+ /*
|
|
|
+ if (wol.wolopts & WAKE_PHY) {
|
|
|
+ vptr->wol_opts|=VELOCITY_WOL_PHY;
|
|
|
+ vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
|
|
|
+ }
|
|
|
+ */
|
|
|
+
|
|
|
+ if (wol->wolopts & WAKE_MAGIC) {
|
|
|
+ vptr->wol_opts |= VELOCITY_WOL_MAGIC;
|
|
|
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
|
|
|
+ }
|
|
|
+ if (wol->wolopts & WAKE_UCAST) {
|
|
|
+ vptr->wol_opts |= VELOCITY_WOL_UCAST;
|
|
|
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
|
|
|
+ }
|
|
|
+ if (wol->wolopts & WAKE_ARP) {
|
|
|
+ vptr->wol_opts |= VELOCITY_WOL_ARP;
|
|
|
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
|
|
|
+ }
|
|
|
+ memcpy(vptr->wol_passwd, wol->sopass, 6);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static u32 velocity_get_msglevel(struct net_device *dev)
|
|
|
+{
|
|
|
+ return msglevel;
|
|
|
+}
|
|
|
+
|
|
|
+static void velocity_set_msglevel(struct net_device *dev, u32 value)
|
|
|
+{
|
|
|
+ msglevel = value;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct ethtool_ops velocity_ethtool_ops = {
|
|
|
+ .get_settings = velocity_get_settings,
|
|
|
+ .set_settings = velocity_set_settings,
|
|
|
+ .get_drvinfo = velocity_get_drvinfo,
|
|
|
+ .get_wol = velocity_ethtool_get_wol,
|
|
|
+ .set_wol = velocity_ethtool_set_wol,
|
|
|
+ .get_msglevel = velocity_get_msglevel,
|
|
|
+ .set_msglevel = velocity_set_msglevel,
|
|
|
+ .get_link = velocity_get_link,
|
|
|
+ .begin = velocity_ethtool_up,
|
|
|
+ .complete = velocity_ethtool_down
|
|
|
+};
|
|
|
|
|
|
+#ifdef CONFIG_PM
|
|
|
+#ifdef CONFIG_INET
|
|
|
static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
|
|
|
{
|
|
|
struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
|
|
@@ -3419,6 +3261,63 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
}
|
|
|
+#endif /* CONFIG_INET */
|
|
|
+#endif /* CONFIG_PM */
|
|
|
|
|
|
-#endif
|
|
|
-#endif
|
|
|
+#if defined(CONFIG_PM) && defined(CONFIG_INET)
|
|
|
+static struct notifier_block velocity_inetaddr_notifier = {
|
|
|
+ .notifier_call = velocity_netdev_event,
|
|
|
+};
|
|
|
+
|
|
|
+static void velocity_register_notifier(void)
|
|
|
+{
|
|
|
+ register_inetaddr_notifier(&velocity_inetaddr_notifier);
|
|
|
+}
|
|
|
+
|
|
|
+static void velocity_unregister_notifier(void)
|
|
|
+{
|
|
|
+ unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
|
|
|
+}
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+#define velocity_register_notifier() do {} while (0)
|
|
|
+#define velocity_unregister_notifier() do {} while (0)
|
|
|
+
|
|
|
+#endif /* defined(CONFIG_PM) && defined(CONFIG_INET) */
|
|
|
+
|
|
|
+/**
|
|
|
+ * velocity_init_module - load time function
|
|
|
+ *
|
|
|
+ * Called when the velocity module is loaded. The PCI driver
|
|
|
+ * is registered with the PCI layer, and in turn will call
|
|
|
+ * the probe functions for each velocity adapter installed
|
|
|
+ * in the system.
|
|
|
+ */
|
|
|
+static int __init velocity_init_module(void)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ velocity_register_notifier();
|
|
|
+ ret = pci_register_driver(&velocity_driver);
|
|
|
+ if (ret < 0)
|
|
|
+ velocity_unregister_notifier();
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * velocity_cleanup - module unload
|
|
|
+ *
|
|
|
+ * When the velocity hardware is unloaded this function is called.
|
|
|
+ * It will clean up the notifiers and the unregister the PCI
|
|
|
+ * driver interface for this hardware. This in turn cleans up
|
|
|
+ * all discovered interfaces before returning from the function
|
|
|
+ */
|
|
|
+static void __exit velocity_cleanup_module(void)
|
|
|
+{
|
|
|
+ velocity_unregister_notifier();
|
|
|
+ pci_unregister_driver(&velocity_driver);
|
|
|
+}
|
|
|
+
|
|
|
+module_init(velocity_init_module);
|
|
|
+module_exit(velocity_cleanup_module);
|