|
@@ -125,6 +125,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * i40e_get_head - Retrieve head from head writeback
|
|
|
+ * @tx_ring: tx ring to fetch head of
|
|
|
+ *
|
|
|
+ * Returns value of Tx ring head based on value stored
|
|
|
+ * in head write-back location
|
|
|
+ **/
|
|
|
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
|
|
|
+{
|
|
|
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
|
|
|
+
|
|
|
+ return le32_to_cpu(*(volatile __le32 *)head);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* i40e_get_tx_pending - how many tx descriptors not processed
|
|
|
* @tx_ring: the ring of descriptors
|
|
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
|
|
|
**/
|
|
|
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
|
|
|
{
|
|
|
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
|
|
|
- ? ring->next_to_use
|
|
|
- : ring->next_to_use + ring->count);
|
|
|
- return ntu - ring->next_to_clean;
|
|
|
+ u32 head, tail;
|
|
|
+
|
|
|
+ head = i40e_get_head(ring);
|
|
|
+ tail = readl(ring->tail);
|
|
|
+
|
|
|
+ if (head != tail)
|
|
|
+ return (head < tail) ?
|
|
|
+ tail - head : (tail + ring->count - head);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
|
|
|
**/
|
|
|
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
|
|
|
{
|
|
|
+ u32 tx_done = tx_ring->stats.packets;
|
|
|
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
|
|
|
u32 tx_pending = i40e_get_tx_pending(tx_ring);
|
|
|
bool ret = false;
|
|
|
|
|
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
|
|
|
* run the check_tx_hang logic with a transmit completion
|
|
|
* pending but without time to complete it yet.
|
|
|
*/
|
|
|
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
|
|
|
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
|
|
|
+ if ((tx_done_old == tx_done) && tx_pending) {
|
|
|
/* make sure it is true for two checks in a row */
|
|
|
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
|
|
|
&tx_ring->state);
|
|
|
- } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
|
|
|
- !(tx_pending < I40E_MIN_DESC_PENDING) ||
|
|
|
- !(tx_pending > 0)) {
|
|
|
+ } else if (tx_done_old == tx_done &&
|
|
|
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
|
|
|
/* update completed stats and disarm the hang check */
|
|
|
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
|
|
|
+ tx_ring->tx_stats.tx_done_old = tx_done;
|
|
|
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * i40e_get_head - Retrieve head from head writeback
|
|
|
- * @tx_ring: tx ring to fetch head of
|
|
|
- *
|
|
|
- * Returns value of Tx ring head based on value stored
|
|
|
- * in head write-back location
|
|
|
- **/
|
|
|
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
|
|
|
-{
|
|
|
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
|
|
|
-
|
|
|
- return le32_to_cpu(*(volatile __le32 *)head);
|
|
|
-}
|
|
|
-
|
|
|
#define WB_STRIDE 0x3
|
|
|
|
|
|
/**
|
|
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
|
if (err < 0)
|
|
|
return err;
|
|
|
|
|
|
- if (protocol == htons(ETH_P_IP)) {
|
|
|
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
|
|
|
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
|
|
|
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
|
|
|
+
|
|
|
+ if (iph->version == 4) {
|
|
|
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
|
|
|
iph->tot_len = 0;
|
|
|
iph->check = 0;
|
|
|
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
|
|
|
0, IPPROTO_TCP, 0);
|
|
|
- } else if (skb_is_gso_v6(skb)) {
|
|
|
-
|
|
|
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
|
|
|
- : ipv6_hdr(skb);
|
|
|
+ } else if (ipv6h->version == 6) {
|
|
|
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
|
|
|
ipv6h->payload_len = 0;
|
|
|
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
|
|
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
|
|
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
|
|
|
}
|
|
|
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
|
|
|
- if (tx_flags & I40E_TX_FLAGS_TSO) {
|
|
|
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
|
|
|
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
|
|
|
+ if (tx_flags & I40E_TX_FLAGS_TSO)
|
|
|
ip_hdr(skb)->check = 0;
|
|
|
- } else {
|
|
|
- *cd_tunneling |=
|
|
|
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
/* Now set the ctx descriptor fields */
|
|
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
|
|
((skb_inner_network_offset(skb) -
|
|
|
skb_transport_offset(skb)) >> 1) <<
|
|
|
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
|
|
|
+ if (this_ip_hdr->version == 6) {
|
|
|
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
|
|
|
+ tx_flags |= I40E_TX_FLAGS_IPV6;
|
|
|
+ }
|
|
|
+
|
|
|
|
|
|
} else {
|
|
|
network_hdr_len = skb_network_header_len(skb);
|
|
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
|
|
|
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
|
|
|
}
|
|
|
|
|
|
+ /**
|
|
|
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
|
|
|
+ * @skb: send buffer
|
|
|
+ * @tx_flags: collected send information
|
|
|
+ * @hdr_len: size of the packet header
|
|
|
+ *
|
|
|
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
|
|
|
+ * a packet on the wire and so we need to figure out the cases where we
|
|
|
+ * need to linearize the skb.
|
|
|
+ **/
|
|
|
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
|
|
|
+ const u8 hdr_len)
|
|
|
+{
|
|
|
+ struct skb_frag_struct *frag;
|
|
|
+ bool linearize = false;
|
|
|
+ unsigned int size = 0;
|
|
|
+ u16 num_frags;
|
|
|
+ u16 gso_segs;
|
|
|
+
|
|
|
+ num_frags = skb_shinfo(skb)->nr_frags;
|
|
|
+ gso_segs = skb_shinfo(skb)->gso_segs;
|
|
|
+
|
|
|
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
|
|
|
+ u16 j = 1;
|
|
|
+
|
|
|
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
|
|
|
+ goto linearize_chk_done;
|
|
|
+ /* try the simple math, if we have too many frags per segment */
|
|
|
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
|
|
|
+ I40E_MAX_BUFFER_TXD) {
|
|
|
+ linearize = true;
|
|
|
+ goto linearize_chk_done;
|
|
|
+ }
|
|
|
+ frag = &skb_shinfo(skb)->frags[0];
|
|
|
+ size = hdr_len;
|
|
|
+ /* we might still have more fragments per segment */
|
|
|
+ do {
|
|
|
+ size += skb_frag_size(frag);
|
|
|
+ frag++; j++;
|
|
|
+ if (j == I40E_MAX_BUFFER_TXD) {
|
|
|
+ if (size < skb_shinfo(skb)->gso_size) {
|
|
|
+ linearize = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ j = 1;
|
|
|
+ size -= skb_shinfo(skb)->gso_size;
|
|
|
+ if (size)
|
|
|
+ j++;
|
|
|
+ size += hdr_len;
|
|
|
+ }
|
|
|
+ num_frags--;
|
|
|
+ } while (num_frags);
|
|
|
+ } else {
|
|
|
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
|
|
|
+ linearize = true;
|
|
|
+ }
|
|
|
+
|
|
|
+linearize_chk_done:
|
|
|
+ return linearize;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* i40e_tx_map - Build the Tx descriptor
|
|
|
* @tx_ring: ring to send buffer on
|
|
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
|
|
else if (tso)
|
|
|
tx_flags |= I40E_TX_FLAGS_TSO;
|
|
|
|
|
|
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
|
|
|
+ if (skb_linearize(skb))
|
|
|
+ goto out_drop;
|
|
|
+
|
|
|
skb_tx_timestamp(skb);
|
|
|
|
|
|
/* always enable CRC insertion offload */
|