|
@@ -1636,6 +1636,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
|
q_idx = q_idx % cpsw->tx_ch_num;
|
|
q_idx = q_idx % cpsw->tx_ch_num;
|
|
|
|
|
|
txch = cpsw->txv[q_idx].ch;
|
|
txch = cpsw->txv[q_idx].ch;
|
|
|
|
+ txq = netdev_get_tx_queue(ndev, q_idx);
|
|
ret = cpsw_tx_packet_submit(priv, skb, txch);
|
|
ret = cpsw_tx_packet_submit(priv, skb, txch);
|
|
if (unlikely(ret != 0)) {
|
|
if (unlikely(ret != 0)) {
|
|
cpsw_err(priv, tx_err, "desc submit failed\n");
|
|
cpsw_err(priv, tx_err, "desc submit failed\n");
|
|
@@ -1646,15 +1647,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
|
* tell the kernel to stop sending us tx frames.
|
|
* tell the kernel to stop sending us tx frames.
|
|
*/
|
|
*/
|
|
if (unlikely(!cpdma_check_free_tx_desc(txch))) {
|
|
if (unlikely(!cpdma_check_free_tx_desc(txch))) {
|
|
- txq = netdev_get_tx_queue(ndev, q_idx);
|
|
|
|
netif_tx_stop_queue(txq);
|
|
netif_tx_stop_queue(txq);
|
|
|
|
+
|
|
|
|
+ /* Barrier, so that stop_queue visible to other cpus */
|
|
|
|
+ smp_mb__after_atomic();
|
|
|
|
+
|
|
|
|
+ if (cpdma_check_free_tx_desc(txch))
|
|
|
|
+ netif_tx_wake_queue(txq);
|
|
}
|
|
}
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
return NETDEV_TX_OK;
|
|
fail:
|
|
fail:
|
|
ndev->stats.tx_dropped++;
|
|
ndev->stats.tx_dropped++;
|
|
- txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
|
|
|
|
netif_tx_stop_queue(txq);
|
|
netif_tx_stop_queue(txq);
|
|
|
|
+
|
|
|
|
+ /* Barrier, so that stop_queue visible to other cpus */
|
|
|
|
+ smp_mb__after_atomic();
|
|
|
|
+
|
|
|
|
+ if (cpdma_check_free_tx_desc(txch))
|
|
|
|
+ netif_tx_wake_queue(txq);
|
|
|
|
+
|
|
return NETDEV_TX_BUSY;
|
|
return NETDEV_TX_BUSY;
|
|
}
|
|
}
|
|
|
|
|