|
@@ -364,6 +364,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
|
|
RING_IDX cons, prod;
|
|
|
unsigned short id;
|
|
|
struct sk_buff *skb;
|
|
|
+ bool more_to_do;
|
|
|
|
|
|
BUG_ON(!netif_carrier_ok(queue->info->netdev));
|
|
|
|
|
@@ -398,18 +399,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
|
|
|
|
|
queue->tx.rsp_cons = prod;
|
|
|
|
|
|
- /*
|
|
|
- * Set a new event, then check for race with update of tx_cons.
|
|
|
- * Note that it is essential to schedule a callback, no matter
|
|
|
- * how few buffers are pending. Even if there is space in the
|
|
|
- * transmit ring, higher layers may be blocked because too much
|
|
|
- * data is outstanding: in such cases notification from Xen is
|
|
|
- * likely to be the only kick that we'll get.
|
|
|
- */
|
|
|
- queue->tx.sring->rsp_event =
|
|
|
- prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
|
|
|
- mb(); /* update shared area */
|
|
|
- } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
|
|
|
+ RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
|
|
|
+ } while (more_to_do);
|
|
|
|
|
|
xennet_maybe_wake_tx(queue);
|
|
|
}
|