|
@@ -9,6 +9,7 @@
|
|
|
#include <net/sock_reuseport.h>
|
|
|
#include <linux/bpf.h>
|
|
|
#include <linux/idr.h>
|
|
|
+#include <linux/filter.h>
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
|
|
#define INIT_SOCKS 128
|
|
@@ -133,8 +134,7 @@ static void reuseport_free_rcu(struct rcu_head *head)
|
|
|
struct sock_reuseport *reuse;
|
|
|
|
|
|
reuse = container_of(head, struct sock_reuseport, rcu);
|
|
|
- if (reuse->prog)
|
|
|
- bpf_prog_destroy(reuse->prog);
|
|
|
+ sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
|
|
|
if (reuse->reuseport_id)
|
|
|
ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
|
|
|
kfree(reuse);
|
|
@@ -219,9 +219,9 @@ void reuseport_detach_sock(struct sock *sk)
|
|
|
}
|
|
|
EXPORT_SYMBOL(reuseport_detach_sock);
|
|
|
|
|
|
-static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
|
|
|
- struct bpf_prog *prog, struct sk_buff *skb,
|
|
|
- int hdr_len)
|
|
|
+static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
|
|
|
+ struct bpf_prog *prog, struct sk_buff *skb,
|
|
|
+ int hdr_len)
|
|
|
{
|
|
|
struct sk_buff *nskb = NULL;
|
|
|
u32 index;
|
|
@@ -282,9 +282,15 @@ struct sock *reuseport_select_sock(struct sock *sk,
|
|
|
/* paired with smp_wmb() in reuseport_add_sock() */
|
|
|
smp_rmb();
|
|
|
|
|
|
- if (prog && skb)
|
|
|
- sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
|
|
|
+ if (!prog || !skb)
|
|
|
+ goto select_by_hash;
|
|
|
+
|
|
|
+ if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
|
|
|
+ sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
|
|
|
+ else
|
|
|
+ sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
|
|
|
|
|
|
+select_by_hash:
|
|
|
/* no bpf or invalid bpf result: fall back to hash usage */
|
|
|
if (!sk2)
|
|
|
sk2 = reuse->socks[reciprocal_scale(hash, socks)];
|
|
@@ -296,12 +302,21 @@ out:
|
|
|
}
|
|
|
EXPORT_SYMBOL(reuseport_select_sock);
|
|
|
|
|
|
-struct bpf_prog *
|
|
|
-reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
|
|
|
+int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
|
|
|
{
|
|
|
struct sock_reuseport *reuse;
|
|
|
struct bpf_prog *old_prog;
|
|
|
|
|
|
+ if (sk_unhashed(sk) && sk->sk_reuseport) {
|
|
|
+ int err = reuseport_alloc(sk, false);
|
|
|
+
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
|
|
|
+ /* The socket wasn't bound with SO_REUSEPORT */
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
spin_lock_bh(&reuseport_lock);
|
|
|
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
|
|
|
lockdep_is_held(&reuseport_lock));
|
|
@@ -310,6 +325,7 @@ reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
|
|
|
rcu_assign_pointer(reuse->prog, prog);
|
|
|
spin_unlock_bh(&reuseport_lock);
|
|
|
|
|
|
- return old_prog;
|
|
|
+ sk_reuseport_prog_free(old_prog);
|
|
|
+ return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(reuseport_attach_prog);
|