|
@@ -496,6 +496,78 @@ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
|
|
|
csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
|
|
|
}
|
|
|
|
|
|
+static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
|
|
|
+ struct net_device *dev,
|
|
|
+ struct flowi6 *fl6, __u8 *dsfield,
|
|
|
+ int *encap_limit)
|
|
|
+{
|
|
|
+ const struct iphdr *iph = ip_hdr(skb);
|
|
|
+ struct ip6_tnl *t = netdev_priv(dev);
|
|
|
+
|
|
|
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
|
|
+ *encap_limit = t->parms.encap_limit;
|
|
|
+
|
|
|
+ memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
|
|
|
+
|
|
|
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
|
|
|
+ *dsfield = ipv4_get_dsfield(iph);
|
|
|
+ else
|
|
|
+ *dsfield = ip6_tclass(t->parms.flowinfo);
|
|
|
+
|
|
|
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
|
|
|
+ fl6->flowi6_mark = skb->mark;
|
|
|
+ else
|
|
|
+ fl6->flowi6_mark = t->parms.fwmark;
|
|
|
+
|
|
|
+ fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
|
|
|
+}
|
|
|
+
|
|
|
+static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
|
|
|
+ struct net_device *dev,
|
|
|
+ struct flowi6 *fl6, __u8 *dsfield,
|
|
|
+ int *encap_limit)
|
|
|
+{
|
|
|
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
|
|
+ struct ip6_tnl *t = netdev_priv(dev);
|
|
|
+ __u16 offset;
|
|
|
+
|
|
|
+ offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
|
|
|
+ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
|
|
|
+
|
|
|
+ if (offset > 0) {
|
|
|
+ struct ipv6_tlv_tnl_enc_lim *tel;
|
|
|
+
|
|
|
+ tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
|
|
|
+ if (tel->encap_limit == 0) {
|
|
|
+ icmpv6_send(skb, ICMPV6_PARAMPROB,
|
|
|
+ ICMPV6_HDR_FIELD, offset + 2);
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ *encap_limit = tel->encap_limit - 1;
|
|
|
+ } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
|
|
|
+ *encap_limit = t->parms.encap_limit;
|
|
|
+ }
|
|
|
+
|
|
|
+ memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
|
|
|
+
|
|
|
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
|
|
|
+ *dsfield = ipv6_get_dsfield(ipv6h);
|
|
|
+ else
|
|
|
+ *dsfield = ip6_tclass(t->parms.flowinfo);
|
|
|
+
|
|
|
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
|
|
|
+ fl6->flowlabel |= ip6_flowlabel(ipv6h);
|
|
|
+
|
|
|
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
|
|
|
+ fl6->flowi6_mark = skb->mark;
|
|
|
+ else
|
|
|
+ fl6->flowi6_mark = t->parms.fwmark;
|
|
|
+
|
|
|
+ fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
|
|
|
struct net_device *dev, __u8 dsfield,
|
|
|
struct flowi6 *fl6, int encap_limit,
|
|
@@ -527,7 +599,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
|
|
|
static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
|
|
|
{
|
|
|
struct ip6_tnl *t = netdev_priv(dev);
|
|
|
- const struct iphdr *iph = ip_hdr(skb);
|
|
|
int encap_limit = -1;
|
|
|
struct flowi6 fl6;
|
|
|
__u8 dsfield;
|
|
@@ -536,21 +607,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
|
|
|
|
|
- if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
|
|
- encap_limit = t->parms.encap_limit;
|
|
|
-
|
|
|
- memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
|
|
|
-
|
|
|
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
|
|
|
- dsfield = ipv4_get_dsfield(iph);
|
|
|
- else
|
|
|
- dsfield = ip6_tclass(t->parms.flowinfo);
|
|
|
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
|
|
|
- fl6.flowi6_mark = skb->mark;
|
|
|
- else
|
|
|
- fl6.flowi6_mark = t->parms.fwmark;
|
|
|
-
|
|
|
- fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
|
|
|
+ prepare_ip6gre_xmit_ipv4(skb, dev, &fl6, &dsfield, &encap_limit);
|
|
|
|
|
|
err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
|
|
|
if (err)
|
|
@@ -574,7 +631,6 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
|
|
|
struct ip6_tnl *t = netdev_priv(dev);
|
|
|
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
|
|
int encap_limit = -1;
|
|
|
- __u16 offset;
|
|
|
struct flowi6 fl6;
|
|
|
__u8 dsfield;
|
|
|
__u32 mtu;
|
|
@@ -583,37 +639,8 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
|
|
|
if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
|
|
|
return -1;
|
|
|
|
|
|
- offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
|
|
|
- /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
|
|
|
- ipv6h = ipv6_hdr(skb);
|
|
|
-
|
|
|
- if (offset > 0) {
|
|
|
- struct ipv6_tlv_tnl_enc_lim *tel;
|
|
|
- tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
|
|
|
- if (tel->encap_limit == 0) {
|
|
|
- icmpv6_send(skb, ICMPV6_PARAMPROB,
|
|
|
- ICMPV6_HDR_FIELD, offset + 2);
|
|
|
- return -1;
|
|
|
- }
|
|
|
- encap_limit = tel->encap_limit - 1;
|
|
|
- } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
|
|
- encap_limit = t->parms.encap_limit;
|
|
|
-
|
|
|
- memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
|
|
|
-
|
|
|
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
|
|
|
- dsfield = ipv6_get_dsfield(ipv6h);
|
|
|
- else
|
|
|
- dsfield = ip6_tclass(t->parms.flowinfo);
|
|
|
-
|
|
|
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
|
|
|
- fl6.flowlabel |= ip6_flowlabel(ipv6h);
|
|
|
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
|
|
|
- fl6.flowi6_mark = skb->mark;
|
|
|
- else
|
|
|
- fl6.flowi6_mark = t->parms.fwmark;
|
|
|
-
|
|
|
- fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
|
|
|
+ if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
|
|
|
+ return -1;
|
|
|
|
|
|
if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
|
|
|
return -1;
|