|
@@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth)
|
|
|
GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
|
|
|
}
|
|
|
|
|
|
+static inline void greth_enable_tx_and_irq(struct greth_private *greth)
|
|
|
+{
|
|
|
+ wmb(); /* BDs must been written to memory before enabling TX */
|
|
|
+ GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
|
|
|
+}
|
|
|
+
|
|
|
static inline void greth_disable_tx(struct greth_private *greth)
|
|
|
{
|
|
|
GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
|
|
@@ -447,29 +453,30 @@ out:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
|
|
|
+{
|
|
|
+ if (tx_next < tx_last)
|
|
|
+ return (tx_last - tx_next) - 1;
|
|
|
+ else
|
|
|
+ return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
|
|
|
+}
|
|
|
|
|
|
static netdev_tx_t
|
|
|
greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
|
|
{
|
|
|
struct greth_private *greth = netdev_priv(dev);
|
|
|
struct greth_bd *bdp;
|
|
|
- u32 status = 0, dma_addr, ctrl;
|
|
|
+ u32 status, dma_addr;
|
|
|
int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
|
|
|
unsigned long flags;
|
|
|
+ u16 tx_last;
|
|
|
|
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
+ tx_last = greth->tx_last;
|
|
|
+ rmb(); /* tx_last is updated by the poll task */
|
|
|
|
|
|
- /* Clean TX Ring */
|
|
|
- greth_clean_tx_gbit(dev);
|
|
|
-
|
|
|
- if (greth->tx_free < nr_frags + 1) {
|
|
|
- spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
|
|
|
- ctrl = GRETH_REGLOAD(greth->regs->control);
|
|
|
- /* Enable TX IRQ only if not already in poll() routine */
|
|
|
- if (ctrl & GRETH_RXI)
|
|
|
- GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
|
|
|
+ if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
|
|
|
netif_stop_queue(dev);
|
|
|
- spin_unlock_irqrestore(&greth->devlock, flags);
|
|
|
err = NETDEV_TX_BUSY;
|
|
|
goto out;
|
|
|
}
|
|
@@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
|
|
/* Linear buf */
|
|
|
if (nr_frags != 0)
|
|
|
status = GRETH_TXBD_MORE;
|
|
|
+ else
|
|
|
+ status = GRETH_BD_IE;
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
|
status |= GRETH_TXBD_CSALL;
|
|
@@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
/* Enable the descriptor chain by enabling the first descriptor */
|
|
|
bdp = greth->tx_bd_base + greth->tx_next;
|
|
|
- greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
|
|
|
- greth->tx_next = curr_tx;
|
|
|
- greth->tx_free -= nr_frags + 1;
|
|
|
-
|
|
|
- wmb();
|
|
|
+ greth_write_bd(&bdp->stat,
|
|
|
+ greth_read_bd(&bdp->stat) | GRETH_BD_EN);
|
|
|
|
|
|
spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
|
|
|
- greth_enable_tx(greth);
|
|
|
+ greth->tx_next = curr_tx;
|
|
|
+ greth_enable_tx_and_irq(greth);
|
|
|
spin_unlock_irqrestore(&greth->devlock, flags);
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
@@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev)
|
|
|
if (greth->tx_free > 0) {
|
|
|
netif_wake_queue(dev);
|
|
|
}
|
|
|
-
|
|
|
}
|
|
|
|
|
|
static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
|
|
@@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev)
|
|
|
{
|
|
|
struct greth_private *greth;
|
|
|
struct greth_bd *bdp, *bdp_last_frag;
|
|
|
- struct sk_buff *skb;
|
|
|
+ struct sk_buff *skb = NULL;
|
|
|
u32 stat;
|
|
|
int nr_frags, i;
|
|
|
+ u16 tx_last;
|
|
|
|
|
|
greth = netdev_priv(dev);
|
|
|
+ tx_last = greth->tx_last;
|
|
|
|
|
|
- while (greth->tx_free < GRETH_TXBD_NUM) {
|
|
|
+ while (tx_last != greth->tx_next) {
|
|
|
|
|
|
- skb = greth->tx_skbuff[greth->tx_last];
|
|
|
+ skb = greth->tx_skbuff[tx_last];
|
|
|
|
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
|
|
/* We only clean fully completed SKBs */
|
|
|
- bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
|
|
|
+ bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
|
|
|
|
|
|
GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
|
|
|
mb();
|
|
@@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev)
|
|
|
if (stat & GRETH_BD_EN)
|
|
|
break;
|
|
|
|
|
|
- greth->tx_skbuff[greth->tx_last] = NULL;
|
|
|
+ greth->tx_skbuff[tx_last] = NULL;
|
|
|
|
|
|
greth_update_tx_stats(dev, stat);
|
|
|
dev->stats.tx_bytes += skb->len;
|
|
|
|
|
|
- bdp = greth->tx_bd_base + greth->tx_last;
|
|
|
+ bdp = greth->tx_bd_base + tx_last;
|
|
|
|
|
|
- greth->tx_last = NEXT_TX(greth->tx_last);
|
|
|
+ tx_last = NEXT_TX(tx_last);
|
|
|
|
|
|
dma_unmap_single(greth->dev,
|
|
|
greth_read_bd(&bdp->addr),
|
|
@@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev)
|
|
|
|
|
|
for (i = 0; i < nr_frags; i++) {
|
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
|
- bdp = greth->tx_bd_base + greth->tx_last;
|
|
|
+ bdp = greth->tx_bd_base + tx_last;
|
|
|
|
|
|
dma_unmap_page(greth->dev,
|
|
|
greth_read_bd(&bdp->addr),
|
|
|
skb_frag_size(frag),
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
- greth->tx_last = NEXT_TX(greth->tx_last);
|
|
|
+ tx_last = NEXT_TX(tx_last);
|
|
|
}
|
|
|
- greth->tx_free += nr_frags+1;
|
|
|
dev_kfree_skb(skb);
|
|
|
}
|
|
|
+ if (skb) { /* skb is set only if the above while loop was entered */
|
|
|
+ wmb();
|
|
|
+ greth->tx_last = tx_last;
|
|
|
|
|
|
- if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
|
|
|
- netif_wake_queue(dev);
|
|
|
+ if (netif_queue_stopped(dev) &&
|
|
|
+ (greth_num_free_bds(tx_last, greth->tx_next) >
|
|
|
+ (MAX_SKB_FRAGS+1)))
|
|
|
+ netif_wake_queue(dev);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static int greth_rx(struct net_device *dev, int limit)
|
|
@@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget)
|
|
|
greth = container_of(napi, struct greth_private, napi);
|
|
|
|
|
|
restart_txrx_poll:
|
|
|
- if (netif_queue_stopped(greth->netdev)) {
|
|
|
- if (greth->gbit_mac)
|
|
|
- greth_clean_tx_gbit(greth->netdev);
|
|
|
- else
|
|
|
- greth_clean_tx(greth->netdev);
|
|
|
- }
|
|
|
-
|
|
|
if (greth->gbit_mac) {
|
|
|
+ greth_clean_tx_gbit(greth->netdev);
|
|
|
work_done += greth_rx_gbit(greth->netdev, budget - work_done);
|
|
|
} else {
|
|
|
+ if (netif_queue_stopped(greth->netdev))
|
|
|
+ greth_clean_tx(greth->netdev);
|
|
|
work_done += greth_rx(greth->netdev, budget - work_done);
|
|
|
}
|
|
|
|
|
@@ -983,7 +992,8 @@ restart_txrx_poll:
|
|
|
spin_lock_irqsave(&greth->devlock, flags);
|
|
|
|
|
|
ctrl = GRETH_REGLOAD(greth->regs->control);
|
|
|
- if (netif_queue_stopped(greth->netdev)) {
|
|
|
+ if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
|
|
|
+ (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
|
|
|
GRETH_REGSAVE(greth->regs->control,
|
|
|
ctrl | GRETH_TXI | GRETH_RXI);
|
|
|
mask = GRETH_INT_RX | GRETH_INT_RE |
|