|
@@ -748,6 +748,17 @@ static bool chk_code_allowed(u16 code_to_probe)
|
|
|
return codes[code_to_probe];
|
|
|
}
|
|
|
|
|
|
+static bool bpf_check_basics_ok(const struct sock_filter *filter,
|
|
|
+ unsigned int flen)
|
|
|
+{
|
|
|
+ if (filter == NULL)
|
|
|
+ return false;
|
|
|
+ if (flen == 0 || flen > BPF_MAXINSNS)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* bpf_check_classic - verify socket filter code
|
|
|
* @filter: filter to verify
|
|
@@ -768,9 +779,6 @@ static int bpf_check_classic(const struct sock_filter *filter,
|
|
|
bool anc_found;
|
|
|
int pc;
|
|
|
|
|
|
- if (flen == 0 || flen > BPF_MAXINSNS)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
/* Check the filter code now */
|
|
|
for (pc = 0; pc < flen; pc++) {
|
|
|
const struct sock_filter *ftest = &filter[pc];
|
|
@@ -1065,7 +1073,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
|
|
|
struct bpf_prog *fp;
|
|
|
|
|
|
/* Make sure new filter is there and in the right amounts. */
|
|
|
- if (fprog->filter == NULL)
|
|
|
+ if (!bpf_check_basics_ok(fprog->filter, fprog->len))
|
|
|
return -EINVAL;
|
|
|
|
|
|
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
|
|
@@ -1112,7 +1120,7 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
|
|
|
int err;
|
|
|
|
|
|
/* Make sure new filter is there and in the right amounts. */
|
|
|
- if (fprog->filter == NULL)
|
|
|
+ if (!bpf_check_basics_ok(fprog->filter, fprog->len))
|
|
|
return -EINVAL;
|
|
|
|
|
|
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
|
|
@@ -1207,7 +1215,6 @@ static
|
|
|
struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
|
|
|
{
|
|
|
unsigned int fsize = bpf_classic_proglen(fprog);
|
|
|
- unsigned int bpf_fsize = bpf_prog_size(fprog->len);
|
|
|
struct bpf_prog *prog;
|
|
|
int err;
|
|
|
|
|
@@ -1215,10 +1222,10 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
|
|
|
return ERR_PTR(-EPERM);
|
|
|
|
|
|
/* Make sure new filter is there and in the right amounts. */
|
|
|
- if (fprog->filter == NULL)
|
|
|
+ if (!bpf_check_basics_ok(fprog->filter, fprog->len))
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
- prog = bpf_prog_alloc(bpf_fsize, 0);
|
|
|
+ prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
|
|
|
if (!prog)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
@@ -1603,9 +1610,36 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
|
|
|
.arg5_type = ARG_ANYTHING,
|
|
|
};
|
|
|
|
|
|
+static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ if (skb_at_tc_ingress(skb))
|
|
|
+ skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
|
|
|
+
|
|
|
+ return dev_forward_skb(dev, skb);
|
|
|
+}
|
|
|
+
|
|
|
+static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
|
|
|
+ net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
|
|
|
+ kfree_skb(skb);
|
|
|
+ return -ENETDOWN;
|
|
|
+ }
|
|
|
+
|
|
|
+ skb->dev = dev;
|
|
|
+
|
|
|
+ __this_cpu_inc(xmit_recursion);
|
|
|
+ ret = dev_queue_xmit(skb);
|
|
|
+ __this_cpu_dec(xmit_recursion);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
|
|
|
{
|
|
|
- struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
|
|
|
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
|
|
|
struct net_device *dev;
|
|
|
|
|
|
if (unlikely(flags & ~(BPF_F_INGRESS)))
|
|
@@ -1615,19 +1649,12 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
|
|
|
if (unlikely(!dev))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- skb2 = skb_clone(skb, GFP_ATOMIC);
|
|
|
- if (unlikely(!skb2))
|
|
|
+ skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
+ if (unlikely(!skb))
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- if (flags & BPF_F_INGRESS) {
|
|
|
- if (skb_at_tc_ingress(skb2))
|
|
|
- skb_postpush_rcsum(skb2, skb_mac_header(skb2),
|
|
|
- skb2->mac_len);
|
|
|
- return dev_forward_skb(dev, skb2);
|
|
|
- }
|
|
|
-
|
|
|
- skb2->dev = dev;
|
|
|
- return dev_queue_xmit(skb2);
|
|
|
+ return flags & BPF_F_INGRESS ?
|
|
|
+ __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
|
|
|
}
|
|
|
|
|
|
static const struct bpf_func_proto bpf_clone_redirect_proto = {
|
|
@@ -1671,15 +1698,8 @@ int skb_do_redirect(struct sk_buff *skb)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- if (ri->flags & BPF_F_INGRESS) {
|
|
|
- if (skb_at_tc_ingress(skb))
|
|
|
- skb_postpush_rcsum(skb, skb_mac_header(skb),
|
|
|
- skb->mac_len);
|
|
|
- return dev_forward_skb(dev, skb);
|
|
|
- }
|
|
|
-
|
|
|
- skb->dev = dev;
|
|
|
- return dev_queue_xmit(skb);
|
|
|
+ return ri->flags & BPF_F_INGRESS ?
|
|
|
+ __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
|
|
|
}
|
|
|
|
|
|
static const struct bpf_func_proto bpf_redirect_proto = {
|