|
@@ -7730,7 +7730,8 @@ static void ixgbe_service_task(struct work_struct *work)
|
|
|
|
|
|
static int ixgbe_tso(struct ixgbe_ring *tx_ring,
|
|
|
struct ixgbe_tx_buffer *first,
|
|
|
- u8 *hdr_len)
|
|
|
+ u8 *hdr_len,
|
|
|
+ struct ixgbe_ipsec_tx_data *itd)
|
|
|
{
|
|
|
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
|
|
|
struct sk_buff *skb = first->skb;
|
|
@@ -7744,6 +7745,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
|
|
|
unsigned char *hdr;
|
|
|
} l4;
|
|
|
u32 paylen, l4_offset;
|
|
|
+ u32 fceof_saidx = 0;
|
|
|
int err;
|
|
|
|
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
@@ -7769,13 +7771,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
|
|
|
if (ip.v4->version == 4) {
|
|
|
unsigned char *csum_start = skb_checksum_start(skb);
|
|
|
unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
|
|
|
+ int len = csum_start - trans_start;
|
|
|
|
|
|
/* IP header will have to cancel out any data that
|
|
|
- * is not a part of the outer IP header
|
|
|
+ * is not a part of the outer IP header, so set to
|
|
|
+ * a reverse csum if needed, else init check to 0.
|
|
|
*/
|
|
|
- ip.v4->check = csum_fold(csum_partial(trans_start,
|
|
|
- csum_start - trans_start,
|
|
|
- 0));
|
|
|
+ ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
|
|
|
+ csum_fold(csum_partial(trans_start,
|
|
|
+ len, 0)) : 0;
|
|
|
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
|
|
|
|
|
|
ip.v4->tot_len = 0;
|
|
@@ -7806,12 +7810,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
|
|
|
mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
|
|
|
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
|
|
|
|
|
|
+ fceof_saidx |= itd->sa_idx;
|
|
|
+ type_tucmd |= itd->flags | itd->trailer_len;
|
|
|
+
|
|
|
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
|
|
|
vlan_macip_lens = l4.hdr - ip.hdr;
|
|
|
vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
|
|
|
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
|
|
|
|
|
|
- ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
|
|
|
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
|
|
|
mss_l4len_idx);
|
|
|
|
|
|
return 1;
|
|
@@ -8502,7 +8509,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
|
|
if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
|
|
|
goto out_drop;
|
|
|
#endif
|
|
|
- tso = ixgbe_tso(tx_ring, first, &hdr_len);
|
|
|
+ tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
|
|
|
if (tso < 0)
|
|
|
goto out_drop;
|
|
|
else if (!tso)
|
|
@@ -9911,9 +9918,15 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
|
|
/* We can only support IPV4 TSO in tunnels if we can mangle the
|
|
|
* inner IP ID field, so strip TSO if MANGLEID is not supported.
|
|
|
+ * IPsec offoad sets skb->encapsulation but still can handle
|
|
|
+ * the TSO, so it's the exception.
|
|
|
*/
|
|
|
- if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
|
|
|
- features &= ~NETIF_F_TSO;
|
|
|
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
|
|
|
+#ifdef CONFIG_XFRM
|
|
|
+ if (!skb->sp)
|
|
|
+#endif
|
|
|
+ features &= ~NETIF_F_TSO;
|
|
|
+ }
|
|
|
|
|
|
return features;
|
|
|
}
|