|
@@ -1858,7 +1858,7 @@ linearize_chk_done:
|
|
*
|
|
*
|
|
* Returns -EBUSY if a stop is needed, else 0
|
|
* Returns -EBUSY if a stop is needed, else 0
|
|
**/
|
|
**/
|
|
-static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|
|
|
|
|
+int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|
{
|
|
{
|
|
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
|
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
|
/* Memory barrier before checking head and tail */
|
|
/* Memory barrier before checking head and tail */
|
|
@@ -1874,20 +1874,6 @@ static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-/**
|
|
|
|
- * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
|
|
|
|
- * @tx_ring: the ring to be checked
|
|
|
|
- * @size: the size buffer we want to assure is available
|
|
|
|
- *
|
|
|
|
- * Returns 0 if stop is not needed
|
|
|
|
- **/
|
|
|
|
-static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|
|
|
-{
|
|
|
|
- if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
|
|
|
|
- return 0;
|
|
|
|
- return __i40evf_maybe_stop_tx(tx_ring, size);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* i40evf_tx_map - Build the Tx descriptor
|
|
* i40evf_tx_map - Build the Tx descriptor
|
|
* @tx_ring: ring to send buffer on
|
|
* @tx_ring: ring to send buffer on
|
|
@@ -2003,7 +1989,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
|
|
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
|
|
tx_ring->queue_index),
|
|
tx_ring->queue_index),
|
|
first->bytecount);
|
|
first->bytecount);
|
|
- i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
|
|
|
|
|
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
|
|
|
|
|
/* Algorithm to optimize tail and RS bit setting:
|
|
/* Algorithm to optimize tail and RS bit setting:
|
|
* if xmit_more is supported
|
|
* if xmit_more is supported
|
|
@@ -2085,38 +2071,6 @@ dma_error:
|
|
tx_ring->next_to_use = i;
|
|
tx_ring->next_to_use = i;
|
|
}
|
|
}
|
|
|
|
|
|
-/**
|
|
|
|
- * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
|
|
|
|
- * @skb: send buffer
|
|
|
|
- * @tx_ring: ring to send buffer on
|
|
|
|
- *
|
|
|
|
- * Returns number of data descriptors needed for this skb. Returns 0 to indicate
|
|
|
|
- * there is not enough descriptors available in this ring since we need at least
|
|
|
|
- * one descriptor.
|
|
|
|
- **/
|
|
|
|
-static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
|
|
|
|
- struct i40e_ring *tx_ring)
|
|
|
|
-{
|
|
|
|
- unsigned int f;
|
|
|
|
- int count = 0;
|
|
|
|
-
|
|
|
|
- /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
|
|
|
|
- * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
|
|
|
|
- * + 4 desc gap to avoid the cache line where head is,
|
|
|
|
- * + 1 desc for context descriptor,
|
|
|
|
- * otherwise try next time
|
|
|
|
- */
|
|
|
|
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
|
|
|
|
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
|
|
|
|
-
|
|
|
|
- count += TXD_USE_COUNT(skb_headlen(skb));
|
|
|
|
- if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
|
|
|
|
- tx_ring->tx_stats.tx_busy++;
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
- return count;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* i40e_xmit_frame_ring - Sends buffer on Tx ring
|
|
* i40e_xmit_frame_ring - Sends buffer on Tx ring
|
|
* @skb: send buffer
|
|
* @skb: send buffer
|
|
@@ -2135,13 +2089,23 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
|
__be16 protocol;
|
|
__be16 protocol;
|
|
u32 td_cmd = 0;
|
|
u32 td_cmd = 0;
|
|
u8 hdr_len = 0;
|
|
u8 hdr_len = 0;
|
|
- int tso;
|
|
|
|
|
|
+ int tso, count;
|
|
|
|
|
|
/* prefetch the data, we'll need it later */
|
|
/* prefetch the data, we'll need it later */
|
|
prefetch(skb->data);
|
|
prefetch(skb->data);
|
|
|
|
|
|
- if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
|
|
|
|
|
|
+ count = i40e_xmit_descriptor_count(skb);
|
|
|
|
+
|
|
|
|
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
|
|
|
|
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
|
|
|
|
+ * + 4 desc gap to avoid the cache line where head is,
|
|
|
|
+ * + 1 desc for context descriptor,
|
|
|
|
+ * otherwise try next time
|
|
|
|
+ */
|
|
|
|
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
|
|
|
|
+ tx_ring->tx_stats.tx_busy++;
|
|
return NETDEV_TX_BUSY;
|
|
return NETDEV_TX_BUSY;
|
|
|
|
+ }
|
|
|
|
|
|
/* prepare the xmit flags */
|
|
/* prepare the xmit flags */
|
|
if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
|
|
if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
|