Browse Source

net: stmmac: add BQL support

Add support for Byte Queue Limits to the STMicro MAC driver.

Tested on a Amlogic S802 quad Cortex-A9 board, where the use of BQL
decreases the latency of a high priority ping from ~12ms to ~1ms when
the 100Mbit link is saturated by 20 TCP streams.

Signed-off-by: Beniamino Galvani <b.galvani@gmail.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Giuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Beniamino Galvani 11 years ago
parent
commit
3897957494
1 changed files with 9 additions and 0 deletions
  1. 9 0
      drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

+ 9 - 0
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

@@ -1097,6 +1097,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
 
 
 	priv->dirty_tx = 0;
 	priv->dirty_tx = 0;
 	priv->cur_tx = 0;
 	priv->cur_tx = 0;
+	netdev_reset_queue(priv->dev);
 
 
 	stmmac_clear_descriptors(priv);
 	stmmac_clear_descriptors(priv);
 
 
@@ -1300,6 +1301,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
 static void stmmac_tx_clean(struct stmmac_priv *priv)
 static void stmmac_tx_clean(struct stmmac_priv *priv)
 {
 {
 	unsigned int txsize = priv->dma_tx_size;
 	unsigned int txsize = priv->dma_tx_size;
+	unsigned int bytes_compl = 0, pkts_compl = 0;
 
 
 	spin_lock(&priv->tx_lock);
 	spin_lock(&priv->tx_lock);
 
 
@@ -1356,6 +1358,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
 		priv->hw->mode->clean_desc3(priv, p);
 		priv->hw->mode->clean_desc3(priv, p);
 
 
 		if (likely(skb != NULL)) {
 		if (likely(skb != NULL)) {
+			pkts_compl++;
+			bytes_compl += skb->len;
 			dev_consume_skb_any(skb);
 			dev_consume_skb_any(skb);
 			priv->tx_skbuff[entry] = NULL;
 			priv->tx_skbuff[entry] = NULL;
 		}
 		}
@@ -1364,6 +1368,9 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
 
 
 		priv->dirty_tx++;
 		priv->dirty_tx++;
 	}
 	}
+
+	netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
+
 	if (unlikely(netif_queue_stopped(priv->dev) &&
 	if (unlikely(netif_queue_stopped(priv->dev) &&
 		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
 		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
 		netif_tx_lock(priv->dev);
 		netif_tx_lock(priv->dev);
@@ -1418,6 +1425,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
 						     (i == txsize - 1));
 						     (i == txsize - 1));
 	priv->dirty_tx = 0;
 	priv->dirty_tx = 0;
 	priv->cur_tx = 0;
 	priv->cur_tx = 0;
+	netdev_reset_queue(priv->dev);
 	priv->hw->dma->start_tx(priv->ioaddr);
 	priv->hw->dma->start_tx(priv->ioaddr);
 
 
 	priv->dev->stats.tx_errors++;
 	priv->dev->stats.tx_errors++;
@@ -2050,6 +2058,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 	if (!priv->hwts_tx_en)
 	if (!priv->hwts_tx_en)
 		skb_tx_timestamp(skb);
 		skb_tx_timestamp(skb);
 
 
+	netdev_sent_queue(dev, skb->len);
 	priv->hw->dma->enable_dma_transmission(priv->ioaddr);
 	priv->hw->dma->enable_dma_transmission(priv->ioaddr);
 
 
 	spin_unlock(&priv->tx_lock);
 	spin_unlock(&priv->tx_lock);