bpf-cgroup.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _BPF_CGROUP_H
  3. #define _BPF_CGROUP_H
  4. #include <linux/errno.h>
  5. #include <linux/jump_label.h>
  6. #include <uapi/linux/bpf.h>
  7. struct sock;
  8. struct sockaddr;
  9. struct cgroup;
  10. struct sk_buff;
  11. struct bpf_sock_ops_kern;
  12. #ifdef CONFIG_CGROUP_BPF
  13. extern struct static_key_false cgroup_bpf_enabled_key;
  14. #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
  15. struct bpf_prog_list {
  16. struct list_head node;
  17. struct bpf_prog *prog;
  18. };
  19. struct bpf_prog_array;
  20. struct cgroup_bpf {
  21. /* array of effective progs in this cgroup */
  22. struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
  23. /* attached progs to this cgroup and attach flags
  24. * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
  25. * have either zero or one element
  26. * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
  27. */
  28. struct list_head progs[MAX_BPF_ATTACH_TYPE];
  29. u32 flags[MAX_BPF_ATTACH_TYPE];
  30. /* temp storage for effective prog array used by prog_attach/detach */
  31. struct bpf_prog_array __rcu *inactive;
  32. };
  33. void cgroup_bpf_put(struct cgroup *cgrp);
  34. int cgroup_bpf_inherit(struct cgroup *cgrp);
  35. int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  36. enum bpf_attach_type type, u32 flags);
  37. int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  38. enum bpf_attach_type type, u32 flags);
  39. int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  40. union bpf_attr __user *uattr);
  41. /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
  42. int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  43. enum bpf_attach_type type, u32 flags);
  44. int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  45. enum bpf_attach_type type, u32 flags);
  46. int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  47. union bpf_attr __user *uattr);
  48. int __cgroup_bpf_run_filter_skb(struct sock *sk,
  49. struct sk_buff *skb,
  50. enum bpf_attach_type type);
  51. int __cgroup_bpf_run_filter_sk(struct sock *sk,
  52. enum bpf_attach_type type);
  53. int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
  54. struct sockaddr *uaddr,
  55. enum bpf_attach_type type,
  56. void *t_ctx);
  57. int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
  58. struct bpf_sock_ops_kern *sock_ops,
  59. enum bpf_attach_type type);
  60. int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
  61. short access, enum bpf_attach_type type);
  62. /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
  63. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
  64. ({ \
  65. int __ret = 0; \
  66. if (cgroup_bpf_enabled) \
  67. __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
  68. BPF_CGROUP_INET_INGRESS); \
  69. \
  70. __ret; \
  71. })
  72. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
  73. ({ \
  74. int __ret = 0; \
  75. if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
  76. typeof(sk) __sk = sk_to_full_sk(sk); \
  77. if (sk_fullsock(__sk)) \
  78. __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
  79. BPF_CGROUP_INET_EGRESS); \
  80. } \
  81. __ret; \
  82. })
  83. #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
  84. ({ \
  85. int __ret = 0; \
  86. if (cgroup_bpf_enabled) { \
  87. __ret = __cgroup_bpf_run_filter_sk(sk, type); \
  88. } \
  89. __ret; \
  90. })
  91. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
  92. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
  93. #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
  94. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
  95. #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
  96. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
  97. #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
  98. ({ \
  99. int __ret = 0; \
  100. if (cgroup_bpf_enabled) \
  101. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
  102. NULL); \
  103. __ret; \
  104. })
  105. #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
  106. ({ \
  107. int __ret = 0; \
  108. if (cgroup_bpf_enabled) { \
  109. lock_sock(sk); \
  110. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
  111. t_ctx); \
  112. release_sock(sk); \
  113. } \
  114. __ret; \
  115. })
  116. #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
  117. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
  118. #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
  119. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
  120. #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
  121. sk->sk_prot->pre_connect)
  122. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
  123. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
  124. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
  125. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
  126. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
  127. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
  128. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
  129. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
  130. #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
  131. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
  132. #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
  133. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
  134. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
  135. ({ \
  136. int __ret = 0; \
  137. if (cgroup_bpf_enabled && (sock_ops)->sk) { \
  138. typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
  139. if (__sk && sk_fullsock(__sk)) \
  140. __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
  141. sock_ops, \
  142. BPF_CGROUP_SOCK_OPS); \
  143. } \
  144. __ret; \
  145. })
  146. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
  147. ({ \
  148. int __ret = 0; \
  149. if (cgroup_bpf_enabled) \
  150. __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
  151. access, \
  152. BPF_CGROUP_DEVICE); \
  153. \
  154. __ret; \
  155. })
  156. int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  157. enum bpf_prog_type ptype, struct bpf_prog *prog);
  158. int cgroup_bpf_prog_detach(const union bpf_attr *attr,
  159. enum bpf_prog_type ptype);
  160. int cgroup_bpf_prog_query(const union bpf_attr *attr,
  161. union bpf_attr __user *uattr);
  162. #else
  163. struct bpf_prog;
  164. struct cgroup_bpf {};
  165. static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
  166. static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
  167. static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  168. enum bpf_prog_type ptype,
  169. struct bpf_prog *prog)
  170. {
  171. return -EINVAL;
  172. }
  173. static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
  174. enum bpf_prog_type ptype)
  175. {
  176. return -EINVAL;
  177. }
  178. static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
  179. union bpf_attr __user *uattr)
  180. {
  181. return -EINVAL;
  182. }
  183. #define cgroup_bpf_enabled (0)
  184. #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
  185. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
  186. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
  187. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
  188. #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
  189. #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
  190. #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
  191. #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
  192. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
  193. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
  194. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
  195. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
  196. #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
  197. #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
  198. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
  199. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
  200. #endif /* CONFIG_CGROUP_BPF */
  201. #endif /* _BPF_CGROUP_H */