|
@@ -132,15 +132,6 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
|
|
|
return max_descs;
|
|
|
}
|
|
|
|
|
|
-/* Get partner of a TX queue, seen as part of the same net core queue */
|
|
|
-static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
|
|
|
-{
|
|
|
- if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
|
|
|
- return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
|
|
|
- else
|
|
|
- return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
|
|
|
-}
|
|
|
-
|
|
|
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
|
|
|
{
|
|
|
/* We need to consider both queues that the net core sees as one */
|
|
@@ -344,6 +335,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
|
|
struct efx_nic *efx = tx_queue->efx;
|
|
|
struct device *dma_dev = &efx->pci_dev->dev;
|
|
|
struct efx_tx_buffer *buffer;
|
|
|
+ unsigned int old_insert_count = tx_queue->insert_count;
|
|
|
skb_frag_t *fragment;
|
|
|
unsigned int len, unmap_len = 0;
|
|
|
dma_addr_t dma_addr, unmap_addr = 0;
|
|
@@ -351,7 +343,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
|
|
unsigned short dma_flags;
|
|
|
int i = 0;
|
|
|
|
|
|
- EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
|
|
|
+ EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
|
|
|
|
|
|
if (skb_shinfo(skb)->gso_size)
|
|
|
return efx_enqueue_skb_tso(tx_queue, skb);
|
|
@@ -369,9 +361,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
|
|
|
|
|
/* Consider using PIO for short packets */
|
|
|
#ifdef EFX_USE_PIO
|
|
|
- if (skb->len <= efx_piobuf_size && tx_queue->piobuf &&
|
|
|
- efx_nic_tx_is_empty(tx_queue) &&
|
|
|
- efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
|
|
|
+ if (skb->len <= efx_piobuf_size && !skb->xmit_more &&
|
|
|
+ efx_nic_may_tx_pio(tx_queue)) {
|
|
|
buffer = efx_enqueue_skb_pio(tx_queue, skb);
|
|
|
dma_flags = EFX_TX_BUF_OPTION;
|
|
|
goto finish_packet;
|
|
@@ -439,13 +430,14 @@ finish_packet:
|
|
|
|
|
|
netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
|
|
|
|
|
|
+ efx_tx_maybe_stop_queue(tx_queue);
|
|
|
+
|
|
|
/* Pass off to hardware */
|
|
|
- efx_nic_push_buffers(tx_queue);
|
|
|
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
|
|
|
+ efx_nic_push_buffers(tx_queue);
|
|
|
|
|
|
tx_queue->tx_packets++;
|
|
|
|
|
|
- efx_tx_maybe_stop_queue(tx_queue);
|
|
|
-
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
dma_err:
|
|
@@ -458,7 +450,7 @@ finish_packet:
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
|
/* Work backwards until we hit the original insert pointer value */
|
|
|
- while (tx_queue->insert_count != tx_queue->write_count) {
|
|
|
+ while (tx_queue->insert_count != old_insert_count) {
|
|
|
unsigned int pkts_compl = 0, bytes_compl = 0;
|
|
|
--tx_queue->insert_count;
|
|
|
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
|
|
@@ -989,12 +981,13 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
|
|
|
/* Remove buffers put into a tx_queue. None of the buffers must have
|
|
|
* an skb attached.
|
|
|
*/
|
|
|
-static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
|
|
|
+static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
|
|
+ unsigned int insert_count)
|
|
|
{
|
|
|
struct efx_tx_buffer *buffer;
|
|
|
|
|
|
/* Work backwards until we hit the original insert pointer value */
|
|
|
- while (tx_queue->insert_count != tx_queue->write_count) {
|
|
|
+ while (tx_queue->insert_count != insert_count) {
|
|
|
--tx_queue->insert_count;
|
|
|
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
|
|
|
efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
|
|
@@ -1258,13 +1251,14 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
|
|
|
struct sk_buff *skb)
|
|
|
{
|
|
|
struct efx_nic *efx = tx_queue->efx;
|
|
|
+ unsigned int old_insert_count = tx_queue->insert_count;
|
|
|
int frag_i, rc;
|
|
|
struct tso_state state;
|
|
|
|
|
|
/* Find the packet protocol and sanity-check it */
|
|
|
state.protocol = efx_tso_check_protocol(skb);
|
|
|
|
|
|
- EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
|
|
|
+ EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
|
|
|
|
|
|
rc = tso_start(&state, efx, skb);
|
|
|
if (rc)
|
|
@@ -1308,11 +1302,12 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
|
|
|
|
|
|
netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
|
|
|
|
|
|
- /* Pass off to hardware */
|
|
|
- efx_nic_push_buffers(tx_queue);
|
|
|
-
|
|
|
efx_tx_maybe_stop_queue(tx_queue);
|
|
|
|
|
|
+ /* Pass off to hardware */
|
|
|
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
|
|
|
+ efx_nic_push_buffers(tx_queue);
|
|
|
+
|
|
|
tx_queue->tso_bursts++;
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
@@ -1336,6 +1331,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
|
|
|
dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
|
|
|
state.header_unmap_len, DMA_TO_DEVICE);
|
|
|
|
|
|
- efx_enqueue_unwind(tx_queue);
|
|
|
+ efx_enqueue_unwind(tx_queue, old_insert_count);
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|