Browse Source

bpf: try harder on clones when writing into skb

When we're dealing with clones and the area is not writeable, try
harder and get a copy via pskb_expand_head(). Replace also other
occurences in tc actions with the new skb_try_make_writable().

Reported-by: Ashhad Sheikh <ashhadsheikh394@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Daniel Borkmann 9 years ago
parent
commit
3697649ff2
4 changed files with 24 additions and 28 deletions
  1. 7 0
      include/linux/skbuff.h
  2. 10 9
      net/core/filter.c
  3. 2 6
      net/sched/act_csum.c
  4. 5 13
      net/sched/act_nat.c

+ 7 - 0
include/linux/skbuff.h

@@ -2630,6 +2630,13 @@ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len
 	       skb_headroom(skb) + len <= skb->hdr_len;
 	       skb_headroom(skb) + len <= skb->hdr_len;
 }
 }
 
 
+static inline int skb_try_make_writable(struct sk_buff *skb,
+					unsigned int write_len)
+{
+	return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
+	       pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
 			    int cloned)
 			    int cloned)
 {
 {

+ 10 - 9
net/core/filter.c

@@ -1364,9 +1364,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
 	 */
 	 */
 	if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
 	if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
 		return -EFAULT;
 		return -EFAULT;
-
-	if (unlikely(skb_cloned(skb) &&
-		     !skb_clone_writable(skb, offset + len)))
+	if (unlikely(skb_try_make_writable(skb, offset + len)))
 		return -EFAULT;
 		return -EFAULT;
 
 
 	ptr = skb_header_pointer(skb, offset, len, sp->buff);
 	ptr = skb_header_pointer(skb, offset, len, sp->buff);
@@ -1439,9 +1437,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
 		return -EINVAL;
 		return -EINVAL;
 	if (unlikely((u32) offset > 0xffff))
 	if (unlikely((u32) offset > 0xffff))
 		return -EFAULT;
 		return -EFAULT;
-
-	if (unlikely(skb_cloned(skb) &&
-		     !skb_clone_writable(skb, offset + sizeof(sum))))
+	if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
 		return -EFAULT;
 		return -EFAULT;
 
 
 	ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
 	ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1488,9 +1484,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
 		return -EINVAL;
 		return -EINVAL;
 	if (unlikely((u32) offset > 0xffff))
 	if (unlikely((u32) offset > 0xffff))
 		return -EFAULT;
 		return -EFAULT;
-
-	if (unlikely(skb_cloned(skb) &&
-		     !skb_clone_writable(skb, offset + sizeof(sum))))
+	if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
 		return -EFAULT;
 		return -EFAULT;
 
 
 	ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
 	ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1734,6 +1728,13 @@ bool bpf_helper_changes_skb_data(void *func)
 		return true;
 		return true;
 	if (func == bpf_skb_vlan_pop)
 	if (func == bpf_skb_vlan_pop)
 		return true;
 		return true;
+	if (func == bpf_skb_store_bytes)
+		return true;
+	if (func == bpf_l3_csum_replace)
+		return true;
+	if (func == bpf_l4_csum_replace)
+		return true;
+
 	return false;
 	return false;
 }
 }
 
 

+ 2 - 6
net/sched/act_csum.c

@@ -105,9 +105,7 @@ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
 	int hl = ihl + jhl;
 	int hl = ihl + jhl;
 
 
 	if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
 	if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
-	    (skb_cloned(skb) &&
-	     !skb_clone_writable(skb, hl + ntkoff) &&
-	     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+	    skb_try_make_writable(skb, hl + ntkoff))
 		return NULL;
 		return NULL;
 	else
 	else
 		return (void *)(skb_network_header(skb) + ihl);
 		return (void *)(skb_network_header(skb) + ihl);
@@ -365,9 +363,7 @@ static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
 	}
 	}
 
 
 	if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
 	if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
-		if (skb_cloned(skb) &&
-		    !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
-		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+		if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
 			goto fail;
 			goto fail;
 
 
 		ip_send_check(ip_hdr(skb));
 		ip_send_check(ip_hdr(skb));

+ 5 - 13
net/sched/act_nat.c

@@ -126,9 +126,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
 		addr = iph->daddr;
 		addr = iph->daddr;
 
 
 	if (!((old_addr ^ addr) & mask)) {
 	if (!((old_addr ^ addr) & mask)) {
-		if (skb_cloned(skb) &&
-		    !skb_clone_writable(skb, sizeof(*iph) + noff) &&
-		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+		if (skb_try_make_writable(skb, sizeof(*iph) + noff))
 			goto drop;
 			goto drop;
 
 
 		new_addr &= mask;
 		new_addr &= mask;
@@ -156,9 +154,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
 		struct tcphdr *tcph;
 		struct tcphdr *tcph;
 
 
 		if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
 		if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
-		    (skb_cloned(skb) &&
-		     !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) &&
-		     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+		    skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
 			goto drop;
 			goto drop;
 
 
 		tcph = (void *)(skb_network_header(skb) + ihl);
 		tcph = (void *)(skb_network_header(skb) + ihl);
@@ -171,9 +167,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
 		struct udphdr *udph;
 		struct udphdr *udph;
 
 
 		if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
 		if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
-		    (skb_cloned(skb) &&
-		     !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) &&
-		     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+		    skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
 			goto drop;
 			goto drop;
 
 
 		udph = (void *)(skb_network_header(skb) + ihl);
 		udph = (void *)(skb_network_header(skb) + ihl);
@@ -213,10 +207,8 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
 		if ((old_addr ^ addr) & mask)
 		if ((old_addr ^ addr) & mask)
 			break;
 			break;
 
 
-		if (skb_cloned(skb) &&
-		    !skb_clone_writable(skb, ihl + sizeof(*icmph) +
-					     sizeof(*iph) + noff) &&
-		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+		if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
+					  sizeof(*iph) + noff))
 			goto drop;
 			goto drop;
 
 
 		icmph = (void *)(skb_network_header(skb) + ihl);
 		icmph = (void *)(skb_network_header(skb) + ihl);