|
@@ -406,7 +406,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
|
|
|
dev_kfree_skb_any(tx_req->skb);
|
|
|
|
|
|
++priv->tx_tail;
|
|
|
- if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
|
|
|
+ if (unlikely((priv->tx_head - priv->tx_tail) == ipoib_sendq_size >> 1) &&
|
|
|
netif_queue_stopped(dev) &&
|
|
|
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
|
|
|
netif_wake_queue(dev);
|
|
@@ -611,8 +611,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
|
|
priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
|
|
|
else
|
|
|
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
|
|
|
-
|
|
|
- if (++priv->tx_outstanding == ipoib_sendq_size) {
|
|
|
+ /* increase the tx_head after send success, but use it for queue state */
|
|
|
+ if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
|
|
|
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
|
|
|
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
|
|
|
ipoib_warn(priv, "request notify on send CQ failed\n");
|
|
@@ -627,7 +627,6 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
|
|
if (unlikely(rc)) {
|
|
|
ipoib_warn(priv, "post_send failed, error %d\n", rc);
|
|
|
++dev->stats.tx_errors;
|
|
|
- --priv->tx_outstanding;
|
|
|
ipoib_dma_unmap_tx(priv, tx_req);
|
|
|
dev_kfree_skb_any(skb);
|
|
|
if (netif_queue_stopped(dev))
|
|
@@ -640,7 +639,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
|
|
++priv->tx_head;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
|
|
|
+ if (unlikely(priv->tx_head - priv->tx_tail > MAX_SEND_CQE))
|
|
|
while (poll_tx(priv))
|
|
|
; /* nothing */
|
|
|
|
|
@@ -773,7 +772,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
|
|
|
ipoib_dma_unmap_tx(priv, tx_req);
|
|
|
dev_kfree_skb_any(tx_req->skb);
|
|
|
++priv->tx_tail;
|
|
|
- --priv->tx_outstanding;
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < ipoib_recvq_size; ++i) {
|