|
|
@@ -598,6 +598,18 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
|
|
|
opt->ops.opt_nflen = 8;
|
|
|
}
|
|
|
|
|
|
+static __sum16 gre6_checksum(struct sk_buff *skb)
|
|
|
+{
|
|
|
+ __wsum csum;
|
|
|
+
|
|
|
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
|
+ csum = lco_csum(skb);
|
|
|
+ else
|
|
|
+ csum = skb_checksum(skb, sizeof(struct ipv6hdr),
|
|
|
+ skb->len - sizeof(struct ipv6hdr), 0);
|
|
|
+ return csum_fold(csum);
|
|
|
+}
|
|
|
+
|
|
|
static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
|
|
|
struct net_device *dev,
|
|
|
__u8 dsfield,
|
|
|
@@ -609,7 +621,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
|
|
|
struct net *net = tunnel->net;
|
|
|
struct net_device *tdev; /* Device to other host */
|
|
|
struct ipv6hdr *ipv6h; /* Our new IP header */
|
|
|
- unsigned int max_headroom = 0; /* The extra header space needed */
|
|
|
+ unsigned int min_headroom = 0; /* The extra header space needed */
|
|
|
int gre_hlen;
|
|
|
struct ipv6_tel_txoption opt;
|
|
|
int mtu;
|
|
|
@@ -617,7 +629,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
|
|
|
struct net_device_stats *stats = &tunnel->dev->stats;
|
|
|
int err = -1;
|
|
|
u8 proto;
|
|
|
- struct sk_buff *new_skb;
|
|
|
__be16 protocol;
|
|
|
|
|
|
if (dev->type == ARPHRD_ETHER)
|
|
|
@@ -660,14 +671,14 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
|
|
|
|
|
|
mtu = dst_mtu(dst) - sizeof(*ipv6h);
|
|
|
if (encap_limit >= 0) {
|
|
|
- max_headroom += 8;
|
|
|
+ min_headroom += 8;
|
|
|
mtu -= 8;
|
|
|
}
|
|
|
if (mtu < IPV6_MIN_MTU)
|
|
|
mtu = IPV6_MIN_MTU;
|
|
|
if (skb_dst(skb))
|
|
|
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
|
|
|
- if (skb->len > mtu) {
|
|
|
+ if (skb->len > mtu && !skb_is_gso(skb)) {
|
|
|
*pmtu = mtu;
|
|
|
err = -EMSGSIZE;
|
|
|
goto tx_err_dst_release;
|
|
|
@@ -685,20 +696,19 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
|
|
|
|
|
|
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
|
|
|
|
|
|
- max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
|
|
|
+ min_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
|
|
|
|
|
|
- if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
|
|
|
- (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
|
|
|
- new_skb = skb_realloc_headroom(skb, max_headroom);
|
|
|
- if (max_headroom > dev->needed_headroom)
|
|
|
- dev->needed_headroom = max_headroom;
|
|
|
- if (!new_skb)
|
|
|
- goto tx_err_dst_release;
|
|
|
+ if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
|
|
|
+ int head_delta = SKB_DATA_ALIGN(min_headroom -
|
|
|
+ skb_headroom(skb) +
|
|
|
+ 16);
|
|
|
|
|
|
- if (skb->sk)
|
|
|
- skb_set_owner_w(new_skb, skb->sk);
|
|
|
- consume_skb(skb);
|
|
|
- skb = new_skb;
|
|
|
+ err = pskb_expand_head(skb, max_t(int, head_delta, 0),
|
|
|
+ 0, GFP_ATOMIC);
|
|
|
+ if (min_headroom > dev->needed_headroom)
|
|
|
+ dev->needed_headroom = min_headroom;
|
|
|
+ if (unlikely(err))
|
|
|
+ goto tx_err_dst_release;
|
|
|
}
|
|
|
|
|
|
if (!fl6->flowi6_mark && ndst)
|
|
|
@@ -711,10 +721,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
|
|
|
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
|
|
|
}
|
|
|
|
|
|
- if (likely(!skb->encapsulation)) {
|
|
|
- skb_reset_inner_headers(skb);
|
|
|
- skb->encapsulation = 1;
|
|
|
- }
|
|
|
+ err = iptunnel_handle_offloads(skb,
|
|
|
+ (tunnel->parms.o_flags & GRE_CSUM) ?
|
|
|
+ SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
|
|
|
+ if (err)
|
|
|
+ goto tx_err_dst_release;
|
|
|
|
|
|
skb_push(skb, gre_hlen);
|
|
|
skb_reset_network_header(skb);
|
|
|
@@ -748,10 +759,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
|
|
|
*ptr = tunnel->parms.o_key;
|
|
|
ptr--;
|
|
|
}
|
|
|
- if (tunnel->parms.o_flags&GRE_CSUM) {
|
|
|
+ if ((tunnel->parms.o_flags & GRE_CSUM) &&
|
|
|
+ !(skb_shinfo(skb)->gso_type &
|
|
|
+ (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
|
|
|
*ptr = 0;
|
|
|
- *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
|
|
|
- skb->len - sizeof(struct ipv6hdr));
|
|
|
+ *(__sum16 *)ptr = gre6_checksum(skb);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -987,6 +999,8 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
|
|
|
dev->mtu = rt->dst.dev->mtu - addend;
|
|
|
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
|
|
dev->mtu -= 8;
|
|
|
+ if (dev->type == ARPHRD_ETHER)
|
|
|
+ dev->mtu -= ETH_HLEN;
|
|
|
|
|
|
if (dev->mtu < IPV6_MIN_MTU)
|
|
|
dev->mtu = IPV6_MIN_MTU;
|
|
|
@@ -1505,6 +1519,11 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
|
|
|
.ndo_get_iflink = ip6_tnl_get_iflink,
|
|
|
};
|
|
|
|
|
|
+#define GRE6_FEATURES (NETIF_F_SG | \
|
|
|
+ NETIF_F_FRAGLIST | \
|
|
|
+ NETIF_F_HIGHDMA | \
|
|
|
+ NETIF_F_HW_CSUM)
|
|
|
+
|
|
|
static void ip6gre_tap_setup(struct net_device *dev)
|
|
|
{
|
|
|
|
|
|
@@ -1538,9 +1557,21 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
|
|
|
nt->net = dev_net(dev);
|
|
|
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
|
|
|
|
|
|
- /* Can use a lockless transmit, unless we generate output sequences */
|
|
|
- if (!(nt->parms.o_flags & GRE_SEQ))
|
|
|
+ dev->features |= GRE6_FEATURES;
|
|
|
+ dev->hw_features |= GRE6_FEATURES;
|
|
|
+
|
|
|
+ if (!(nt->parms.o_flags & GRE_SEQ)) {
|
|
|
+ /* TCP segmentation offload is not supported when we
|
|
|
+ * generate output sequences.
|
|
|
+ */
|
|
|
+ dev->features |= NETIF_F_GSO_SOFTWARE;
|
|
|
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
|
|
|
+
|
|
|
+ /* Can use a lockless transmit, unless we generate
|
|
|
+ * output sequences
|
|
|
+ */
|
|
|
dev->features |= NETIF_F_LLTX;
|
|
|
+ }
|
|
|
|
|
|
err = register_netdevice(dev);
|
|
|
if (err)
|