|
@@ -1355,13 +1355,9 @@ static inline int bpf_try_make_writable(struct sk_buff *skb,
|
|
|
{
|
|
|
int err;
|
|
|
|
|
|
- if (!skb_cloned(skb))
|
|
|
- return 0;
|
|
|
- if (skb_clone_writable(skb, write_len))
|
|
|
- return 0;
|
|
|
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
|
|
- if (!err)
|
|
|
- bpf_compute_data_end(skb);
|
|
|
+ err = skb_ensure_writable(skb, write_len);
|
|
|
+ bpf_compute_data_end(skb);
|
|
|
+
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -1379,42 +1375,25 @@ static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
|
|
|
|
|
|
static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
|
|
|
{
|
|
|
- struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
|
|
|
struct sk_buff *skb = (struct sk_buff *) (long) r1;
|
|
|
- int offset = (int) r2;
|
|
|
+ unsigned int offset = (unsigned int) r2;
|
|
|
void *from = (void *) (long) r3;
|
|
|
unsigned int len = (unsigned int) r4;
|
|
|
void *ptr;
|
|
|
|
|
|
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
|
|
|
return -EINVAL;
|
|
|
-
|
|
|
- /* bpf verifier guarantees that:
|
|
|
- * 'from' pointer points to bpf program stack
|
|
|
- * 'len' bytes of it were initialized
|
|
|
- * 'len' > 0
|
|
|
- * 'skb' is a valid pointer to 'struct sk_buff'
|
|
|
- *
|
|
|
- * so check for invalid 'offset' and too large 'len'
|
|
|
- */
|
|
|
- if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
|
|
|
+ if (unlikely(offset > 0xffff))
|
|
|
return -EFAULT;
|
|
|
if (unlikely(bpf_try_make_writable(skb, offset + len)))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- ptr = skb_header_pointer(skb, offset, len, sp->buff);
|
|
|
- if (unlikely(!ptr))
|
|
|
- return -EFAULT;
|
|
|
-
|
|
|
+ ptr = skb->data + offset;
|
|
|
if (flags & BPF_F_RECOMPUTE_CSUM)
|
|
|
__skb_postpull_rcsum(skb, ptr, len, offset);
|
|
|
|
|
|
memcpy(ptr, from, len);
|
|
|
|
|
|
- if (ptr == sp->buff)
|
|
|
- /* skb_store_bits cannot return -EFAULT here */
|
|
|
- skb_store_bits(skb, offset, ptr, len);
|
|
|
-
|
|
|
if (flags & BPF_F_RECOMPUTE_CSUM)
|
|
|
__skb_postpush_rcsum(skb, ptr, len, offset);
|
|
|
if (flags & BPF_F_INVALIDATE_HASH)
|
|
@@ -1437,12 +1416,12 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
|
|
|
static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
|
|
{
|
|
|
const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
|
|
|
- int offset = (int) r2;
|
|
|
+ unsigned int offset = (unsigned int) r2;
|
|
|
void *to = (void *)(unsigned long) r3;
|
|
|
unsigned int len = (unsigned int) r4;
|
|
|
void *ptr;
|
|
|
|
|
|
- if (unlikely((u32) offset > 0xffff))
|
|
|
+ if (unlikely(offset > 0xffff))
|
|
|
goto err_clear;
|
|
|
|
|
|
ptr = skb_header_pointer(skb, offset, len, to);
|
|
@@ -1470,20 +1449,17 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
|
|
|
static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
|
|
|
{
|
|
|
struct sk_buff *skb = (struct sk_buff *) (long) r1;
|
|
|
- int offset = (int) r2;
|
|
|
- __sum16 sum, *ptr;
|
|
|
+ unsigned int offset = (unsigned int) r2;
|
|
|
+ __sum16 *ptr;
|
|
|
|
|
|
if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
|
|
|
return -EINVAL;
|
|
|
- if (unlikely((u32) offset > 0xffff))
|
|
|
- return -EFAULT;
|
|
|
- if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
|
|
|
+ if (unlikely(offset > 0xffff || offset & 1))
|
|
|
return -EFAULT;
|
|
|
-
|
|
|
- ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
|
|
|
- if (unlikely(!ptr))
|
|
|
+ if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
|
|
|
return -EFAULT;
|
|
|
|
|
|
+ ptr = (__sum16 *)(skb->data + offset);
|
|
|
switch (flags & BPF_F_HDR_FIELD_MASK) {
|
|
|
case 0:
|
|
|
if (unlikely(from != 0))
|
|
@@ -1501,10 +1477,6 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- if (ptr == &sum)
|
|
|
- /* skb_store_bits guaranteed to not return -EFAULT here */
|
|
|
- skb_store_bits(skb, offset, ptr, sizeof(sum));
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1524,20 +1496,18 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
|
|
|
struct sk_buff *skb = (struct sk_buff *) (long) r1;
|
|
|
bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
|
|
|
bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
|
|
|
- int offset = (int) r2;
|
|
|
- __sum16 sum, *ptr;
|
|
|
+ unsigned int offset = (unsigned int) r2;
|
|
|
+ __sum16 *ptr;
|
|
|
|
|
|
if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
|
|
|
BPF_F_HDR_FIELD_MASK)))
|
|
|
return -EINVAL;
|
|
|
- if (unlikely((u32) offset > 0xffff))
|
|
|
+ if (unlikely(offset > 0xffff || offset & 1))
|
|
|
return -EFAULT;
|
|
|
- if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
|
|
|
+ if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
|
|
|
- if (unlikely(!ptr))
|
|
|
- return -EFAULT;
|
|
|
+ ptr = (__sum16 *)(skb->data + offset);
|
|
|
if (is_mmzero && !*ptr)
|
|
|
return 0;
|
|
|
|
|
@@ -1560,10 +1530,6 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
|
|
|
|
|
|
if (is_mmzero && !*ptr)
|
|
|
*ptr = CSUM_MANGLED_0;
|
|
|
- if (ptr == &sum)
|
|
|
- /* skb_store_bits guaranteed to not return -EFAULT here */
|
|
|
- skb_store_bits(skb, offset, ptr, sizeof(sum));
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|