sock_reuseport.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * To speed up listener socket lookup, create an array to store all sockets
  4. * listening on the same port. This allows a decision to be made after finding
  5. * the first socket. An optional BPF program can also be configured for
  6. * selecting the socket index from the array of available sockets.
  7. */
  8. #include <net/sock_reuseport.h>
  9. #include <linux/bpf.h>
  10. #include <linux/rcupdate.h>
  11. #define INIT_SOCKS 128
  12. static DEFINE_SPINLOCK(reuseport_lock);
  13. static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
  14. {
  15. unsigned int size = sizeof(struct sock_reuseport) +
  16. sizeof(struct sock *) * max_socks;
  17. struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
  18. if (!reuse)
  19. return NULL;
  20. reuse->max_socks = max_socks;
  21. RCU_INIT_POINTER(reuse->prog, NULL);
  22. return reuse;
  23. }
  24. int reuseport_alloc(struct sock *sk)
  25. {
  26. struct sock_reuseport *reuse;
  27. /* bh lock used since this function call may precede hlist lock in
  28. * soft irq of receive path or setsockopt from process context
  29. */
  30. spin_lock_bh(&reuseport_lock);
  31. /* Allocation attempts can occur concurrently via the setsockopt path
  32. * and the bind/hash path. Nothing to do when we lose the race.
  33. */
  34. if (rcu_dereference_protected(sk->sk_reuseport_cb,
  35. lockdep_is_held(&reuseport_lock)))
  36. goto out;
  37. reuse = __reuseport_alloc(INIT_SOCKS);
  38. if (!reuse) {
  39. spin_unlock_bh(&reuseport_lock);
  40. return -ENOMEM;
  41. }
  42. reuse->socks[0] = sk;
  43. reuse->num_socks = 1;
  44. rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
  45. out:
  46. spin_unlock_bh(&reuseport_lock);
  47. return 0;
  48. }
  49. EXPORT_SYMBOL(reuseport_alloc);
  50. static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
  51. {
  52. struct sock_reuseport *more_reuse;
  53. u32 more_socks_size, i;
  54. more_socks_size = reuse->max_socks * 2U;
  55. if (more_socks_size > U16_MAX)
  56. return NULL;
  57. more_reuse = __reuseport_alloc(more_socks_size);
  58. if (!more_reuse)
  59. return NULL;
  60. more_reuse->max_socks = more_socks_size;
  61. more_reuse->num_socks = reuse->num_socks;
  62. more_reuse->prog = reuse->prog;
  63. memcpy(more_reuse->socks, reuse->socks,
  64. reuse->num_socks * sizeof(struct sock *));
  65. for (i = 0; i < reuse->num_socks; ++i)
  66. rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
  67. more_reuse);
  68. /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
  69. * that reuse and more_reuse can temporarily share a reference
  70. * to prog.
  71. */
  72. kfree_rcu(reuse, rcu);
  73. return more_reuse;
  74. }
  75. static void reuseport_free_rcu(struct rcu_head *head)
  76. {
  77. struct sock_reuseport *reuse;
  78. reuse = container_of(head, struct sock_reuseport, rcu);
  79. if (reuse->prog)
  80. bpf_prog_destroy(reuse->prog);
  81. kfree(reuse);
  82. }
  83. /**
  84. * reuseport_add_sock - Add a socket to the reuseport group of another.
  85. * @sk: New socket to add to the group.
  86. * @sk2: Socket belonging to the existing reuseport group.
  87. * May return ENOMEM and not add socket to group under memory pressure.
  88. */
  89. int reuseport_add_sock(struct sock *sk, struct sock *sk2)
  90. {
  91. struct sock_reuseport *old_reuse, *reuse;
  92. if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
  93. int err = reuseport_alloc(sk2);
  94. if (err)
  95. return err;
  96. }
  97. spin_lock_bh(&reuseport_lock);
  98. reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
  99. lockdep_is_held(&reuseport_lock));
  100. old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
  101. lockdep_is_held(&reuseport_lock));
  102. if (old_reuse && old_reuse->num_socks != 1) {
  103. spin_unlock_bh(&reuseport_lock);
  104. return -EBUSY;
  105. }
  106. if (reuse->num_socks == reuse->max_socks) {
  107. reuse = reuseport_grow(reuse);
  108. if (!reuse) {
  109. spin_unlock_bh(&reuseport_lock);
  110. return -ENOMEM;
  111. }
  112. }
  113. reuse->socks[reuse->num_socks] = sk;
  114. /* paired with smp_rmb() in reuseport_select_sock() */
  115. smp_wmb();
  116. reuse->num_socks++;
  117. rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
  118. spin_unlock_bh(&reuseport_lock);
  119. if (old_reuse)
  120. call_rcu(&old_reuse->rcu, reuseport_free_rcu);
  121. return 0;
  122. }
  123. void reuseport_detach_sock(struct sock *sk)
  124. {
  125. struct sock_reuseport *reuse;
  126. int i;
  127. spin_lock_bh(&reuseport_lock);
  128. reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
  129. lockdep_is_held(&reuseport_lock));
  130. rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
  131. for (i = 0; i < reuse->num_socks; i++) {
  132. if (reuse->socks[i] == sk) {
  133. reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
  134. reuse->num_socks--;
  135. if (reuse->num_socks == 0)
  136. call_rcu(&reuse->rcu, reuseport_free_rcu);
  137. break;
  138. }
  139. }
  140. spin_unlock_bh(&reuseport_lock);
  141. }
  142. EXPORT_SYMBOL(reuseport_detach_sock);
  143. static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
  144. struct bpf_prog *prog, struct sk_buff *skb,
  145. int hdr_len)
  146. {
  147. struct sk_buff *nskb = NULL;
  148. u32 index;
  149. if (skb_shared(skb)) {
  150. nskb = skb_clone(skb, GFP_ATOMIC);
  151. if (!nskb)
  152. return NULL;
  153. skb = nskb;
  154. }
  155. /* temporarily advance data past protocol header */
  156. if (!pskb_pull(skb, hdr_len)) {
  157. kfree_skb(nskb);
  158. return NULL;
  159. }
  160. index = bpf_prog_run_save_cb(prog, skb);
  161. __skb_push(skb, hdr_len);
  162. consume_skb(nskb);
  163. if (index >= socks)
  164. return NULL;
  165. return reuse->socks[index];
  166. }
  167. /**
  168. * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
  169. * @sk: First socket in the group.
  170. * @hash: When no BPF filter is available, use this hash to select.
  171. * @skb: skb to run through BPF filter.
  172. * @hdr_len: BPF filter expects skb data pointer at payload data. If
  173. * the skb does not yet point at the payload, this parameter represents
  174. * how far the pointer needs to advance to reach the payload.
  175. * Returns a socket that should receive the packet (or NULL on error).
  176. */
  177. struct sock *reuseport_select_sock(struct sock *sk,
  178. u32 hash,
  179. struct sk_buff *skb,
  180. int hdr_len)
  181. {
  182. struct sock_reuseport *reuse;
  183. struct bpf_prog *prog;
  184. struct sock *sk2 = NULL;
  185. u16 socks;
  186. rcu_read_lock();
  187. reuse = rcu_dereference(sk->sk_reuseport_cb);
  188. /* if memory allocation failed or add call is not yet complete */
  189. if (!reuse)
  190. goto out;
  191. prog = rcu_dereference(reuse->prog);
  192. socks = READ_ONCE(reuse->num_socks);
  193. if (likely(socks)) {
  194. /* paired with smp_wmb() in reuseport_add_sock() */
  195. smp_rmb();
  196. if (prog && skb)
  197. sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
  198. /* no bpf or invalid bpf result: fall back to hash usage */
  199. if (!sk2)
  200. sk2 = reuse->socks[reciprocal_scale(hash, socks)];
  201. }
  202. out:
  203. rcu_read_unlock();
  204. return sk2;
  205. }
  206. EXPORT_SYMBOL(reuseport_select_sock);
  207. struct bpf_prog *
  208. reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
  209. {
  210. struct sock_reuseport *reuse;
  211. struct bpf_prog *old_prog;
  212. spin_lock_bh(&reuseport_lock);
  213. reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
  214. lockdep_is_held(&reuseport_lock));
  215. old_prog = rcu_dereference_protected(reuse->prog,
  216. lockdep_is_held(&reuseport_lock));
  217. rcu_assign_pointer(reuse->prog, prog);
  218. spin_unlock_bh(&reuseport_lock);
  219. return old_prog;
  220. }
  221. EXPORT_SYMBOL(reuseport_attach_prog);