|
@@ -29,6 +29,17 @@
|
|
|
#define DRV_NAME "arc_emac"
|
|
|
#define DRV_VERSION "1.0"
|
|
|
|
|
|
+/**
|
|
|
+ * arc_emac_tx_avail - Return the number of available slots in the tx ring.
|
|
|
+ * @priv: Pointer to ARC EMAC private data structure.
|
|
|
+ *
|
|
|
+ * returns: the number of slots available for transmission in tx the ring.
|
|
|
+ */
|
|
|
+static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
|
|
|
+{
|
|
|
+ return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* arc_emac_adjust_link - Adjust the PHY link duplex.
|
|
|
* @ndev: Pointer to the net_device structure.
|
|
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
|
|
|
txbd->info = 0;
|
|
|
|
|
|
*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
|
|
|
-
|
|
|
- if (netif_queue_stopped(ndev))
|
|
|
- netif_wake_queue(ndev);
|
|
|
}
|
|
|
+
|
|
|
+ /* Ensure that txbd_dirty is visible to tx() before checking
|
|
|
+ * for queue stopped.
|
|
|
+ */
|
|
|
+ smp_mb();
|
|
|
+
|
|
|
+ if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
|
|
|
+ netif_wake_queue(ndev);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
|
|
|
|
|
|
len = max_t(unsigned int, ETH_ZLEN, skb->len);
|
|
|
|
|
|
- /* EMAC still holds this buffer in its possession.
|
|
|
- * CPU must not modify this buffer descriptor
|
|
|
- */
|
|
|
- if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
|
|
|
+ if (unlikely(!arc_emac_tx_avail(priv))) {
|
|
|
netif_stop_queue(ndev);
|
|
|
+ netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
|
|
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
|
|
|
/* Increment index to point to the next BD */
|
|
|
*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
|
|
|
|
|
|
- /* Get "info" of the next BD */
|
|
|
- info = &priv->txbd[*txbd_curr].info;
|
|
|
+ /* Ensure that tx_clean() sees the new txbd_curr before
|
|
|
+ * checking the queue status. This prevents an unneeded wake
|
|
|
+ * of the queue in tx_clean().
|
|
|
+ */
|
|
|
+ smp_mb();
|
|
|
|
|
|
- /* Check if if Tx BD ring is full - next BD is still owned by EMAC */
|
|
|
- if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
|
|
|
+ if (!arc_emac_tx_avail(priv)) {
|
|
|
netif_stop_queue(ndev);
|
|
|
+ /* Refresh tx_dirty */
|
|
|
+ smp_mb();
|
|
|
+ if (arc_emac_tx_avail(priv))
|
|
|
+ netif_start_queue(ndev);
|
|
|
+ }
|
|
|
|
|
|
arc_reg_set(priv, R_STATUS, TXPL_MASK);
|
|
|
|