Browse Source

stmmac: tune rx copy via threshold.

There is a threshold now used to also limit the skb allocation
when use zero-copy. This is to avoid that there are incoherence
in the ring due to a failure on skb allocation under very
aggressive testing and under low memory conditions.

Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: Alexandre TORGUE <alexandre.torgue@st.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Giuseppe Cavallaro 9 years ago
parent
commit
120e87f91e

+ 1 - 0
drivers/net/ethernet/stmicro/stmmac/stmmac.h

@@ -75,6 +75,7 @@ struct stmmac_priv {
 	unsigned int dirty_rx;
 	unsigned int dirty_rx;
 	unsigned int dma_buf_sz;
 	unsigned int dma_buf_sz;
 	unsigned int rx_copybreak;
 	unsigned int rx_copybreak;
+	unsigned int rx_zeroc_thresh;
 	u32 rx_riwt;
 	u32 rx_riwt;
 	int hwts_rx_en;
 	int hwts_rx_en;
 	dma_addr_t *rx_skbuff_dma;
 	dma_addr_t *rx_skbuff_dma;

+ 24 - 2
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

@@ -72,6 +72,7 @@ module_param(phyaddr, int, S_IRUGO);
 MODULE_PARM_DESC(phyaddr, "Physical device address");
 MODULE_PARM_DESC(phyaddr, "Physical device address");
 
 
 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
+#define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
 
 
 static int flow_ctrl = FLOW_OFF;
 static int flow_ctrl = FLOW_OFF;
 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
@@ -2138,6 +2139,14 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
 }
 }
 
 
 
 
+static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
+{
+	if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
+		return 0;
+
+	return 1;
+}
+
 /**
 /**
  * stmmac_rx_refill - refill used skb preallocated buffers
  * stmmac_rx_refill - refill used skb preallocated buffers
  * @priv: driver private structure
  * @priv: driver private structure
@@ -2162,8 +2171,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
 			struct sk_buff *skb;
 			struct sk_buff *skb;
 
 
 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
-			if (unlikely(!skb))
+			if (unlikely(!skb)) {
+				/* so for a while no zero-copy! */
+				priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
+				if (unlikely(net_ratelimit()))
+					dev_err(priv->device,
+						"fail to alloc skb entry %d\n",
+						entry);
 				break;
 				break;
+			}
 
 
 			priv->rx_skbuff[entry] = skb;
 			priv->rx_skbuff[entry] = skb;
 			priv->rx_skbuff_dma[entry] =
 			priv->rx_skbuff_dma[entry] =
@@ -2179,9 +2195,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
 
 
 			priv->hw->mode->refill_desc3(priv, p);
 			priv->hw->mode->refill_desc3(priv, p);
 
 
+			if (priv->rx_zeroc_thresh > 0)
+				priv->rx_zeroc_thresh--;
+
 			if (netif_msg_rx_status(priv))
 			if (netif_msg_rx_status(priv))
 				pr_debug("\trefill entry #%d\n", entry);
 				pr_debug("\trefill entry #%d\n", entry);
 		}
 		}
+
 		wmb();
 		wmb();
 		priv->hw->desc->set_rx_owner(p);
 		priv->hw->desc->set_rx_owner(p);
 		wmb();
 		wmb();
@@ -2285,7 +2305,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
 						 frame_len, status);
 						 frame_len, status);
 			}
 			}
 
 
-			if (unlikely(frame_len < priv->rx_copybreak)) {
+			if (unlikely((frame_len < priv->rx_copybreak) ||
+				     stmmac_rx_threshold_count(priv))) {
 				skb = netdev_alloc_skb_ip_align(priv->dev,
 				skb = netdev_alloc_skb_ip_align(priv->dev,
 								frame_len);
 								frame_len);
 				if (unlikely(!skb)) {
 				if (unlikely(!skb)) {
@@ -2320,6 +2341,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
 				}
 				}
 				prefetch(skb->data - NET_IP_ALIGN);
 				prefetch(skb->data - NET_IP_ALIGN);
 				priv->rx_skbuff[entry] = NULL;
 				priv->rx_skbuff[entry] = NULL;
+				priv->rx_zeroc_thresh++;
 
 
 				skb_put(skb, frame_len);
 				skb_put(skb, frame_len);
 				dma_unmap_single(priv->device,
 				dma_unmap_single(priv->device,