|
@@ -1355,6 +1355,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
|
|
|
|
|
|
tx_q->dirty_tx = 0;
|
|
|
tx_q->cur_tx = 0;
|
|
|
+ tx_q->mss = 0;
|
|
|
|
|
|
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
|
|
|
}
|
|
@@ -1946,6 +1947,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
|
|
|
(i == DMA_TX_SIZE - 1));
|
|
|
tx_q->dirty_tx = 0;
|
|
|
tx_q->cur_tx = 0;
|
|
|
+ tx_q->mss = 0;
|
|
|
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
|
|
|
stmmac_start_tx_dma(priv, chan);
|
|
|
|
|
@@ -2632,7 +2634,6 @@ static int stmmac_open(struct net_device *dev)
|
|
|
|
|
|
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
|
|
|
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
|
|
|
- priv->mss = 0;
|
|
|
|
|
|
ret = alloc_dma_desc_resources(priv);
|
|
|
if (ret < 0) {
|
|
@@ -2872,10 +2873,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
mss = skb_shinfo(skb)->gso_size;
|
|
|
|
|
|
/* set new MSS value if needed */
|
|
|
- if (mss != priv->mss) {
|
|
|
+ if (mss != tx_q->mss) {
|
|
|
mss_desc = tx_q->dma_tx + tx_q->cur_tx;
|
|
|
priv->hw->desc->set_mss(mss_desc, mss);
|
|
|
- priv->mss = mss;
|
|
|
+ tx_q->mss = mss;
|
|
|
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
|
|
|
}
|
|
|
|
|
@@ -4436,6 +4437,7 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
|
|
|
|
|
|
tx_q->cur_tx = 0;
|
|
|
tx_q->dirty_tx = 0;
|
|
|
+ tx_q->mss = 0;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -4481,11 +4483,6 @@ int stmmac_resume(struct device *dev)
|
|
|
|
|
|
stmmac_reset_queues_param(priv);
|
|
|
|
|
|
- /* reset private mss value to force mss context settings at
|
|
|
- * next tso xmit (only used for gmac4).
|
|
|
- */
|
|
|
- priv->mss = 0;
|
|
|
-
|
|
|
stmmac_clear_descriptors(priv);
|
|
|
|
|
|
stmmac_hw_setup(ndev, false);
|