|
@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag);
|
|
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
|
|
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
|
|
{
|
|
{
|
|
struct iphdr iph;
|
|
struct iphdr iph;
|
|
|
|
+ int netoff;
|
|
u32 len;
|
|
u32 len;
|
|
|
|
|
|
if (skb->protocol != htons(ETH_P_IP))
|
|
if (skb->protocol != htons(ETH_P_IP))
|
|
return skb;
|
|
return skb;
|
|
|
|
|
|
- if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0)
|
|
|
|
|
|
+ netoff = skb_network_offset(skb);
|
|
|
|
+
|
|
|
|
+ if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
|
|
return skb;
|
|
return skb;
|
|
|
|
|
|
if (iph.ihl < 5 || iph.version != 4)
|
|
if (iph.ihl < 5 || iph.version != 4)
|
|
return skb;
|
|
return skb;
|
|
|
|
|
|
len = ntohs(iph.tot_len);
|
|
len = ntohs(iph.tot_len);
|
|
- if (skb->len < len || len < (iph.ihl * 4))
|
|
|
|
|
|
+ if (skb->len < netoff + len || len < (iph.ihl * 4))
|
|
return skb;
|
|
return skb;
|
|
|
|
|
|
if (ip_is_fragment(&iph)) {
|
|
if (ip_is_fragment(&iph)) {
|
|
skb = skb_share_check(skb, GFP_ATOMIC);
|
|
skb = skb_share_check(skb, GFP_ATOMIC);
|
|
if (skb) {
|
|
if (skb) {
|
|
- if (!pskb_may_pull(skb, iph.ihl*4))
|
|
|
|
|
|
+ if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
|
|
return skb;
|
|
return skb;
|
|
- if (pskb_trim_rcsum(skb, len))
|
|
|
|
|
|
+ if (pskb_trim_rcsum(skb, netoff + len))
|
|
return skb;
|
|
return skb;
|
|
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
|
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
|
if (ip_defrag(skb, user))
|
|
if (ip_defrag(skb, user))
|