bpf-cgroup.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _BPF_CGROUP_H
  3. #define _BPF_CGROUP_H
  4. #include <linux/jump_label.h>
  5. #include <uapi/linux/bpf.h>
  6. struct sock;
  7. struct sockaddr;
  8. struct cgroup;
  9. struct sk_buff;
  10. struct bpf_sock_ops_kern;
  11. #ifdef CONFIG_CGROUP_BPF
  12. extern struct static_key_false cgroup_bpf_enabled_key;
  13. #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
  14. struct bpf_prog_list {
  15. struct list_head node;
  16. struct bpf_prog *prog;
  17. };
  18. struct bpf_prog_array;
  19. struct cgroup_bpf {
  20. /* array of effective progs in this cgroup */
  21. struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
  22. /* attached progs to this cgroup and attach flags
  23. * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
  24. * have either zero or one element
  25. * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
  26. */
  27. struct list_head progs[MAX_BPF_ATTACH_TYPE];
  28. u32 flags[MAX_BPF_ATTACH_TYPE];
  29. /* temp storage for effective prog array used by prog_attach/detach */
  30. struct bpf_prog_array __rcu *inactive;
  31. };
  32. void cgroup_bpf_put(struct cgroup *cgrp);
  33. int cgroup_bpf_inherit(struct cgroup *cgrp);
  34. int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  35. enum bpf_attach_type type, u32 flags);
  36. int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  37. enum bpf_attach_type type, u32 flags);
  38. int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  39. union bpf_attr __user *uattr);
  40. /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
  41. int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  42. enum bpf_attach_type type, u32 flags);
  43. int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  44. enum bpf_attach_type type, u32 flags);
  45. int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  46. union bpf_attr __user *uattr);
  47. int __cgroup_bpf_run_filter_skb(struct sock *sk,
  48. struct sk_buff *skb,
  49. enum bpf_attach_type type);
  50. int __cgroup_bpf_run_filter_sk(struct sock *sk,
  51. enum bpf_attach_type type);
  52. int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
  53. struct sockaddr *uaddr,
  54. enum bpf_attach_type type,
  55. void *t_ctx);
  56. int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
  57. struct bpf_sock_ops_kern *sock_ops,
  58. enum bpf_attach_type type);
  59. int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
  60. short access, enum bpf_attach_type type);
  61. /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
  62. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
  63. ({ \
  64. int __ret = 0; \
  65. if (cgroup_bpf_enabled) \
  66. __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
  67. BPF_CGROUP_INET_INGRESS); \
  68. \
  69. __ret; \
  70. })
  71. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
  72. ({ \
  73. int __ret = 0; \
  74. if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
  75. typeof(sk) __sk = sk_to_full_sk(sk); \
  76. if (sk_fullsock(__sk)) \
  77. __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
  78. BPF_CGROUP_INET_EGRESS); \
  79. } \
  80. __ret; \
  81. })
  82. #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
  83. ({ \
  84. int __ret = 0; \
  85. if (cgroup_bpf_enabled) { \
  86. __ret = __cgroup_bpf_run_filter_sk(sk, type); \
  87. } \
  88. __ret; \
  89. })
  90. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
  91. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
  92. #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
  93. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
  94. #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
  95. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
  96. #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
  97. ({ \
  98. int __ret = 0; \
  99. if (cgroup_bpf_enabled) \
  100. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
  101. NULL); \
  102. __ret; \
  103. })
  104. #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
  105. ({ \
  106. int __ret = 0; \
  107. if (cgroup_bpf_enabled) { \
  108. lock_sock(sk); \
  109. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
  110. t_ctx); \
  111. release_sock(sk); \
  112. } \
  113. __ret; \
  114. })
  115. #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
  116. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
  117. #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
  118. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
  119. #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
  120. sk->sk_prot->pre_connect)
  121. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
  122. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
  123. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
  124. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
  125. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
  126. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
  127. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
  128. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
  129. #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
  130. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
  131. #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
  132. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
  133. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
  134. ({ \
  135. int __ret = 0; \
  136. if (cgroup_bpf_enabled && (sock_ops)->sk) { \
  137. typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
  138. if (__sk && sk_fullsock(__sk)) \
  139. __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
  140. sock_ops, \
  141. BPF_CGROUP_SOCK_OPS); \
  142. } \
  143. __ret; \
  144. })
  145. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
  146. ({ \
  147. int __ret = 0; \
  148. if (cgroup_bpf_enabled) \
  149. __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
  150. access, \
  151. BPF_CGROUP_DEVICE); \
  152. \
  153. __ret; \
  154. })
  155. int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  156. enum bpf_prog_type ptype, struct bpf_prog *prog);
  157. int cgroup_bpf_prog_detach(const union bpf_attr *attr,
  158. enum bpf_prog_type ptype);
  159. int cgroup_bpf_prog_query(const union bpf_attr *attr,
  160. union bpf_attr __user *uattr);
  161. #else
  162. struct bpf_prog;
  163. struct cgroup_bpf {};
  164. static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
  165. static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
  166. static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  167. enum bpf_prog_type ptype,
  168. struct bpf_prog *prog)
  169. {
  170. return -EINVAL;
  171. }
  172. static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
  173. enum bpf_prog_type ptype)
  174. {
  175. return -EINVAL;
  176. }
  177. static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
  178. union bpf_attr __user *uattr)
  179. {
  180. return -EINVAL;
  181. }
  182. #define cgroup_bpf_enabled (0)
  183. #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
  184. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
  185. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
  186. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
  187. #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
  188. #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
  189. #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
  190. #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
  191. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
  192. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
  193. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
  194. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
  195. #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
  196. #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
  197. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
  198. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
  199. #endif /* CONFIG_CGROUP_BPF */
  200. #endif /* _BPF_CGROUP_H */