request_sock.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * NET Generic infrastructure for Network protocols.
  3. *
  4. * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  5. *
  6. * From code originally in include/net/tcp.h
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/random.h>
  15. #include <linux/slab.h>
  16. #include <linux/string.h>
  17. #include <linux/tcp.h>
  18. #include <linux/vmalloc.h>
  19. #include <net/request_sock.h>
  20. /*
  21. * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
  22. * One SYN_RECV socket costs about 80bytes on a 32bit machine.
  23. * It would be better to replace it with a global counter for all sockets
  24. * but then some measure against one socket starving all other sockets
  25. * would be needed.
  26. *
  27. * The minimum value of it is 128. Experiments with real servers show that
  28. * it is absolutely not enough even at 100conn/sec. 256 cures most
  29. * of problems.
  30. * This value is adjusted to 128 for low memory machines,
  31. * and it will increase in proportion to the memory of machine.
  32. * Note : Dont forget somaxconn that may limit backlog too.
  33. */
  34. int sysctl_max_syn_backlog = 256;
  35. EXPORT_SYMBOL(sysctl_max_syn_backlog);
  36. int reqsk_queue_alloc(struct request_sock_queue *queue,
  37. unsigned int nr_table_entries)
  38. {
  39. size_t lopt_size = sizeof(struct listen_sock);
  40. struct listen_sock *lopt = NULL;
  41. nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
  42. nr_table_entries = max_t(u32, nr_table_entries, 8);
  43. nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
  44. lopt_size += nr_table_entries * sizeof(struct request_sock *);
  45. if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
  46. lopt = kzalloc(lopt_size, GFP_KERNEL |
  47. __GFP_NOWARN |
  48. __GFP_NORETRY);
  49. if (!lopt)
  50. lopt = vzalloc(lopt_size);
  51. if (!lopt)
  52. return -ENOMEM;
  53. get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
  54. rwlock_init(&queue->syn_wait_lock);
  55. queue->rskq_accept_head = NULL;
  56. lopt->nr_table_entries = nr_table_entries;
  57. lopt->max_qlen_log = ilog2(nr_table_entries);
  58. write_lock_bh(&queue->syn_wait_lock);
  59. queue->listen_opt = lopt;
  60. write_unlock_bh(&queue->syn_wait_lock);
  61. return 0;
  62. }
  63. void __reqsk_queue_destroy(struct request_sock_queue *queue)
  64. {
  65. /* This is an error recovery path only, no locking needed */
  66. kvfree(queue->listen_opt);
  67. }
  68. static inline struct listen_sock *reqsk_queue_yank_listen_sk(
  69. struct request_sock_queue *queue)
  70. {
  71. struct listen_sock *lopt;
  72. write_lock_bh(&queue->syn_wait_lock);
  73. lopt = queue->listen_opt;
  74. queue->listen_opt = NULL;
  75. write_unlock_bh(&queue->syn_wait_lock);
  76. return lopt;
  77. }
  78. void reqsk_queue_destroy(struct request_sock_queue *queue)
  79. {
  80. /* make all the listen_opt local to us */
  81. struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
  82. if (lopt->qlen != 0) {
  83. unsigned int i;
  84. for (i = 0; i < lopt->nr_table_entries; i++) {
  85. struct request_sock *req;
  86. while ((req = lopt->syn_table[i]) != NULL) {
  87. lopt->syn_table[i] = req->dl_next;
  88. lopt->qlen--;
  89. reqsk_free(req);
  90. }
  91. }
  92. }
  93. WARN_ON(lopt->qlen != 0);
  94. kvfree(lopt);
  95. }
  96. /*
  97. * This function is called to set a Fast Open socket's "fastopen_rsk" field
  98. * to NULL when a TFO socket no longer needs to access the request_sock.
  99. * This happens only after 3WHS has been either completed or aborted (e.g.,
  100. * RST is received).
  101. *
  102. * Before TFO, a child socket is created only after 3WHS is completed,
  103. * hence it never needs to access the request_sock. things get a lot more
  104. * complex with TFO. A child socket, accepted or not, has to access its
  105. * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
  106. * until 3WHS is either completed or aborted. Afterwards the req will stay
  107. * until either the child socket is accepted, or in the rare case when the
  108. * listener is closed before the child is accepted.
  109. *
  110. * In short, a request socket is only freed after BOTH 3WHS has completed
  111. * (or aborted) and the child socket has been accepted (or listener closed).
  112. * When a child socket is accepted, its corresponding req->sk is set to
  113. * NULL since it's no longer needed. More importantly, "req->sk == NULL"
  114. * will be used by the code below to determine if a child socket has been
  115. * accepted or not, and the check is protected by the fastopenq->lock
  116. * described below.
  117. *
  118. * Note that fastopen_rsk is only accessed from the child socket's context
  119. * with its socket lock held. But a request_sock (req) can be accessed by
  120. * both its child socket through fastopen_rsk, and a listener socket through
  121. * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
  122. * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
  123. * only in the rare case when both the listener and the child locks are held,
  124. * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
  125. * The lock also protects other fields such as fastopenq->qlen, which is
  126. * decremented by this function when fastopen_rsk is no longer needed.
  127. *
  128. * Note that another solution was to simply use the existing socket lock
  129. * from the listener. But first socket lock is difficult to use. It is not
  130. * a simple spin lock - one must consider sock_owned_by_user() and arrange
  131. * to use sk_add_backlog() stuff. But what really makes it infeasible is the
  132. * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
  133. * acquire a child's lock while holding listener's socket lock. A corner
  134. * case might also exist in tcp_v4_hnd_req() that will trigger this locking
  135. * order.
  136. *
  137. * When a TFO req is created, it needs to sock_hold its listener to prevent
  138. * the latter data structure from going away.
  139. *
  140. * This function also sets "treq->listener" to NULL and unreference listener
  141. * socket. treq->listener is used by the listener so it is protected by the
  142. * fastopenq->lock in this function.
  143. */
  144. void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
  145. bool reset)
  146. {
  147. struct sock *lsk = tcp_rsk(req)->listener;
  148. struct fastopen_queue *fastopenq =
  149. inet_csk(lsk)->icsk_accept_queue.fastopenq;
  150. tcp_sk(sk)->fastopen_rsk = NULL;
  151. spin_lock_bh(&fastopenq->lock);
  152. fastopenq->qlen--;
  153. tcp_rsk(req)->listener = NULL;
  154. if (req->sk) /* the child socket hasn't been accepted yet */
  155. goto out;
  156. if (!reset || lsk->sk_state != TCP_LISTEN) {
  157. /* If the listener has been closed don't bother with the
  158. * special RST handling below.
  159. */
  160. spin_unlock_bh(&fastopenq->lock);
  161. sock_put(lsk);
  162. reqsk_free(req);
  163. return;
  164. }
  165. /* Wait for 60secs before removing a req that has triggered RST.
  166. * This is a simple defense against TFO spoofing attack - by
  167. * counting the req against fastopen.max_qlen, and disabling
  168. * TFO when the qlen exceeds max_qlen.
  169. *
  170. * For more details see CoNext'11 "TCP Fast Open" paper.
  171. */
  172. req->expires = jiffies + 60*HZ;
  173. if (fastopenq->rskq_rst_head == NULL)
  174. fastopenq->rskq_rst_head = req;
  175. else
  176. fastopenq->rskq_rst_tail->dl_next = req;
  177. req->dl_next = NULL;
  178. fastopenq->rskq_rst_tail = req;
  179. fastopenq->qlen++;
  180. out:
  181. spin_unlock_bh(&fastopenq->lock);
  182. sock_put(lsk);
  183. }