|
@@ -1844,6 +1844,11 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
|
|
if (unlikely(status & tx_dma_own))
|
|
if (unlikely(status & tx_dma_own))
|
|
break;
|
|
break;
|
|
|
|
|
|
|
|
+ /* Make sure descriptor fields are read after reading
|
|
|
|
+ * the own bit.
|
|
|
|
+ */
|
|
|
|
+ dma_rmb();
|
|
|
|
+
|
|
/* Just consider the last segment and ...*/
|
|
/* Just consider the last segment and ...*/
|
|
if (likely(!(status & tx_not_ls))) {
|
|
if (likely(!(status & tx_not_ls))) {
|
|
/* ... verify the status error condition */
|
|
/* ... verify the status error condition */
|
|
@@ -2983,14 +2988,21 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
|
|
tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
|
|
|
|
|
|
/* If context desc is used to change MSS */
|
|
/* If context desc is used to change MSS */
|
|
- if (mss_desc)
|
|
|
|
|
|
+ if (mss_desc) {
|
|
|
|
+ /* Make sure that first descriptor has been completely
|
|
|
|
+ * written, including its own bit. This is because MSS is
|
|
|
|
+ * actually before first descriptor, so we need to make
|
|
|
|
+ * sure that MSS's own bit is the last thing written.
|
|
|
|
+ */
|
|
|
|
+ dma_wmb();
|
|
priv->hw->desc->set_tx_owner(mss_desc);
|
|
priv->hw->desc->set_tx_owner(mss_desc);
|
|
|
|
+ }
|
|
|
|
|
|
/* The own bit must be the latest setting done when prepare the
|
|
/* The own bit must be the latest setting done when prepare the
|
|
* descriptor and then barrier is needed to make sure that
|
|
* descriptor and then barrier is needed to make sure that
|
|
* all is coherent before granting the DMA engine.
|
|
* all is coherent before granting the DMA engine.
|
|
*/
|
|
*/
|
|
- dma_wmb();
|
|
|
|
|
|
+ wmb();
|
|
|
|
|
|
if (netif_msg_pktdata(priv)) {
|
|
if (netif_msg_pktdata(priv)) {
|
|
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
|
|
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
|
|
@@ -3214,7 +3226,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
* descriptor and then barrier is needed to make sure that
|
|
* descriptor and then barrier is needed to make sure that
|
|
* all is coherent before granting the DMA engine.
|
|
* all is coherent before granting the DMA engine.
|
|
*/
|
|
*/
|
|
- dma_wmb();
|
|
|
|
|
|
+ wmb();
|
|
}
|
|
}
|
|
|
|
|
|
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
|
|
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
|