|
@@ -36,31 +36,31 @@ void cgroup_bpf_update(struct cgroup *cgrp,
|
|
struct bpf_prog *prog,
|
|
struct bpf_prog *prog,
|
|
enum bpf_attach_type type);
|
|
enum bpf_attach_type type);
|
|
|
|
|
|
-int __cgroup_bpf_run_filter(struct sock *sk,
|
|
|
|
- struct sk_buff *skb,
|
|
|
|
- enum bpf_attach_type type);
|
|
|
|
-
|
|
|
|
-/* Wrappers for __cgroup_bpf_run_filter() guarded by cgroup_bpf_enabled. */
|
|
|
|
-#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) \
|
|
|
|
-({ \
|
|
|
|
- int __ret = 0; \
|
|
|
|
- if (cgroup_bpf_enabled) \
|
|
|
|
- __ret = __cgroup_bpf_run_filter(sk, skb, \
|
|
|
|
- BPF_CGROUP_INET_INGRESS); \
|
|
|
|
- \
|
|
|
|
- __ret; \
|
|
|
|
|
|
+int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
|
|
|
+ struct sk_buff *skb,
|
|
|
|
+ enum bpf_attach_type type);
|
|
|
|
+
|
|
|
|
+/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
|
|
|
|
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
|
|
|
|
+({ \
|
|
|
|
+ int __ret = 0; \
|
|
|
|
+ if (cgroup_bpf_enabled) \
|
|
|
|
+ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
|
|
|
|
+ BPF_CGROUP_INET_INGRESS); \
|
|
|
|
+ \
|
|
|
|
+ __ret; \
|
|
})
|
|
})
|
|
|
|
|
|
-#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) \
|
|
|
|
-({ \
|
|
|
|
- int __ret = 0; \
|
|
|
|
- if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
|
|
|
|
- typeof(sk) __sk = sk_to_full_sk(sk); \
|
|
|
|
- if (sk_fullsock(__sk)) \
|
|
|
|
- __ret = __cgroup_bpf_run_filter(__sk, skb, \
|
|
|
|
- BPF_CGROUP_INET_EGRESS); \
|
|
|
|
- } \
|
|
|
|
- __ret; \
|
|
|
|
|
|
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
|
|
|
|
+({ \
|
|
|
|
+ int __ret = 0; \
|
|
|
|
+ if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
|
|
|
|
+ typeof(sk) __sk = sk_to_full_sk(sk); \
|
|
|
|
+ if (sk_fullsock(__sk)) \
|
|
|
|
+ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
|
|
|
|
+ BPF_CGROUP_INET_EGRESS); \
|
|
|
|
+ } \
|
|
|
|
+ __ret; \
|
|
})
|
|
})
|
|
|
|
|
|
#else
|
|
#else
|