|
@@ -103,8 +103,7 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
|
|
|
return;
|
|
|
|
|
|
/* cleanup Tx queue statistics */
|
|
|
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
|
|
|
- tx_ring->queue_index));
|
|
|
+ netdev_tx_reset_queue(txring_txq(tx_ring));
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -273,8 +272,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
|
|
|
tx_ring->arm_wb = true;
|
|
|
}
|
|
|
|
|
|
- netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
|
|
|
- tx_ring->queue_index),
|
|
|
+ /* notify netdev of completed buffers */
|
|
|
+ netdev_tx_completed_queue(txring_txq(tx_ring),
|
|
|
total_packets, total_bytes);
|
|
|
|
|
|
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
|
|
@@ -2012,9 +2011,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
|
|
|
|
tx_ring->next_to_use = i;
|
|
|
|
|
|
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
|
|
|
- tx_ring->queue_index),
|
|
|
- first->bytecount);
|
|
|
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
|
|
|
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
|
|
|
|
|
/* Algorithm to optimize tail and RS bit setting:
|
|
@@ -2039,13 +2036,11 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
|
* trigger a force WB.
|
|
|
*/
|
|
|
if (skb->xmit_more &&
|
|
|
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
|
|
|
- tx_ring->queue_index))) {
|
|
|
+ !netif_xmit_stopped(txring_txq(tx_ring))) {
|
|
|
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
|
|
|
tail_bump = false;
|
|
|
} else if (!skb->xmit_more &&
|
|
|
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
|
|
|
- tx_ring->queue_index)) &&
|
|
|
+ !netif_xmit_stopped(txring_txq(tx_ring)) &&
|
|
|
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
|
|
|
(tx_ring->packet_stride < WB_STRIDE) &&
|
|
|
(desc_count < WB_STRIDE)) {
|