bpf-cgroup.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _BPF_CGROUP_H
  3. #define _BPF_CGROUP_H
  4. #include <linux/jump_label.h>
  5. #include <uapi/linux/bpf.h>
  6. struct sock;
  7. struct sockaddr;
  8. struct cgroup;
  9. struct sk_buff;
  10. struct bpf_sock_ops_kern;
  11. #ifdef CONFIG_CGROUP_BPF
  12. extern struct static_key_false cgroup_bpf_enabled_key;
  13. #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
  14. struct bpf_prog_list {
  15. struct list_head node;
  16. struct bpf_prog *prog;
  17. };
  18. struct bpf_prog_array;
  19. struct cgroup_bpf {
  20. /* array of effective progs in this cgroup */
  21. struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
  22. /* attached progs to this cgroup and attach flags
  23. * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
  24. * have either zero or one element
  25. * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
  26. */
  27. struct list_head progs[MAX_BPF_ATTACH_TYPE];
  28. u32 flags[MAX_BPF_ATTACH_TYPE];
  29. /* temp storage for effective prog array used by prog_attach/detach */
  30. struct bpf_prog_array __rcu *inactive;
  31. };
  32. void cgroup_bpf_put(struct cgroup *cgrp);
  33. int cgroup_bpf_inherit(struct cgroup *cgrp);
  34. int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  35. enum bpf_attach_type type, u32 flags);
  36. int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  37. enum bpf_attach_type type, u32 flags);
  38. int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  39. union bpf_attr __user *uattr);
  40. /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
  41. int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  42. enum bpf_attach_type type, u32 flags);
  43. int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  44. enum bpf_attach_type type, u32 flags);
  45. int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  46. union bpf_attr __user *uattr);
  47. int __cgroup_bpf_run_filter_skb(struct sock *sk,
  48. struct sk_buff *skb,
  49. enum bpf_attach_type type);
  50. int __cgroup_bpf_run_filter_sk(struct sock *sk,
  51. enum bpf_attach_type type);
  52. int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
  53. struct sockaddr *uaddr,
  54. enum bpf_attach_type type);
  55. int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
  56. struct bpf_sock_ops_kern *sock_ops,
  57. enum bpf_attach_type type);
  58. int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
  59. short access, enum bpf_attach_type type);
  60. /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
  61. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
  62. ({ \
  63. int __ret = 0; \
  64. if (cgroup_bpf_enabled) \
  65. __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
  66. BPF_CGROUP_INET_INGRESS); \
  67. \
  68. __ret; \
  69. })
  70. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
  71. ({ \
  72. int __ret = 0; \
  73. if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
  74. typeof(sk) __sk = sk_to_full_sk(sk); \
  75. if (sk_fullsock(__sk)) \
  76. __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
  77. BPF_CGROUP_INET_EGRESS); \
  78. } \
  79. __ret; \
  80. })
  81. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
  82. ({ \
  83. int __ret = 0; \
  84. if (cgroup_bpf_enabled) { \
  85. __ret = __cgroup_bpf_run_filter_sk(sk, \
  86. BPF_CGROUP_INET_SOCK_CREATE); \
  87. } \
  88. __ret; \
  89. })
  90. #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
  91. ({ \
  92. int __ret = 0; \
  93. if (cgroup_bpf_enabled) \
  94. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
  95. __ret; \
  96. })
  97. #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
  98. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
  99. #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
  100. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
  101. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
  102. ({ \
  103. int __ret = 0; \
  104. if (cgroup_bpf_enabled && (sock_ops)->sk) { \
  105. typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
  106. if (__sk && sk_fullsock(__sk)) \
  107. __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
  108. sock_ops, \
  109. BPF_CGROUP_SOCK_OPS); \
  110. } \
  111. __ret; \
  112. })
  113. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
  114. ({ \
  115. int __ret = 0; \
  116. if (cgroup_bpf_enabled) \
  117. __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
  118. access, \
  119. BPF_CGROUP_DEVICE); \
  120. \
  121. __ret; \
  122. })
  123. #else
  124. struct cgroup_bpf {};
  125. static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
  126. static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
  127. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
  128. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
  129. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
  130. #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
  131. #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
  132. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
  133. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
  134. #endif /* CONFIG_CGROUP_BPF */
  135. #endif /* _BPF_CGROUP_H */