|
@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
|
|
|
|
|
|
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
|
|
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
|
|
{
|
|
{
|
|
- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
|
|
|
|
- __u8 nexthdr = ipv6h->nexthdr;
|
|
|
|
- __u16 off = sizeof(*ipv6h);
|
|
|
|
|
|
+ const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
|
|
|
|
+ unsigned int nhoff = raw - skb->data;
|
|
|
|
+ unsigned int off = nhoff + sizeof(*ipv6h);
|
|
|
|
+ u8 next, nexthdr = ipv6h->nexthdr;
|
|
|
|
|
|
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
|
|
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
|
|
- __u16 optlen = 0;
|
|
|
|
struct ipv6_opt_hdr *hdr;
|
|
struct ipv6_opt_hdr *hdr;
|
|
- if (raw + off + sizeof(*hdr) > skb->data &&
|
|
|
|
- !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
|
|
|
|
|
|
+ u16 optlen;
|
|
|
|
+
|
|
|
|
+ if (!pskb_may_pull(skb, off + sizeof(*hdr)))
|
|
break;
|
|
break;
|
|
|
|
|
|
- hdr = (struct ipv6_opt_hdr *) (raw + off);
|
|
|
|
|
|
+ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
|
|
if (nexthdr == NEXTHDR_FRAGMENT) {
|
|
if (nexthdr == NEXTHDR_FRAGMENT) {
|
|
struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
|
|
struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
|
|
if (frag_hdr->frag_off)
|
|
if (frag_hdr->frag_off)
|
|
@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
|
|
} else {
|
|
} else {
|
|
optlen = ipv6_optlen(hdr);
|
|
optlen = ipv6_optlen(hdr);
|
|
}
|
|
}
|
|
|
|
+ /* cache hdr->nexthdr, since pskb_may_pull() might
|
|
|
|
+ * invalidate hdr
|
|
|
|
+ */
|
|
|
|
+ next = hdr->nexthdr;
|
|
if (nexthdr == NEXTHDR_DEST) {
|
|
if (nexthdr == NEXTHDR_DEST) {
|
|
- __u16 i = off + 2;
|
|
|
|
|
|
+ u16 i = 2;
|
|
|
|
+
|
|
|
|
+ /* Remember : hdr is no longer valid at this point. */
|
|
|
|
+ if (!pskb_may_pull(skb, off + optlen))
|
|
|
|
+ break;
|
|
|
|
+
|
|
while (1) {
|
|
while (1) {
|
|
struct ipv6_tlv_tnl_enc_lim *tel;
|
|
struct ipv6_tlv_tnl_enc_lim *tel;
|
|
|
|
|
|
/* No more room for encapsulation limit */
|
|
/* No more room for encapsulation limit */
|
|
- if (i + sizeof (*tel) > off + optlen)
|
|
|
|
|
|
+ if (i + sizeof(*tel) > optlen)
|
|
break;
|
|
break;
|
|
|
|
|
|
- tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
|
|
|
|
|
|
+ tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i;
|
|
/* return index of option if found and valid */
|
|
/* return index of option if found and valid */
|
|
if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
|
|
if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
|
|
tel->length == 1)
|
|
tel->length == 1)
|
|
- return i;
|
|
|
|
|
|
+ return i + off - nhoff;
|
|
/* else jump to next option */
|
|
/* else jump to next option */
|
|
if (tel->type)
|
|
if (tel->type)
|
|
i += tel->length + 2;
|
|
i += tel->length + 2;
|
|
@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
|
|
i++;
|
|
i++;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- nexthdr = hdr->nexthdr;
|
|
|
|
|
|
+ nexthdr = next;
|
|
off += optlen;
|
|
off += optlen;
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
@@ -1303,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
fl6.flowlabel = key->label;
|
|
fl6.flowlabel = key->label;
|
|
} else {
|
|
} else {
|
|
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
|
|
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
|
|
|
|
+ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
|
|
|
|
+ ipv6h = ipv6_hdr(skb);
|
|
if (offset > 0) {
|
|
if (offset > 0) {
|
|
struct ipv6_tlv_tnl_enc_lim *tel;
|
|
struct ipv6_tlv_tnl_enc_lim *tel;
|
|
|
|
|