request_sock.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. /*
  2. * NET Generic infrastructure for Network protocols.
  3. *
  4. * Definitions for request_sock
  5. *
  6. * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  7. *
  8. * From code originally in include/net/tcp.h
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #ifndef _REQUEST_SOCK_H
  16. #define _REQUEST_SOCK_H
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/types.h>
  20. #include <linux/bug.h>
  21. #include <net/sock.h>
  22. struct request_sock;
  23. struct sk_buff;
  24. struct dst_entry;
  25. struct proto;
  26. struct request_sock_ops {
  27. int family;
  28. int obj_size;
  29. struct kmem_cache *slab;
  30. char *slab_name;
  31. int (*rtx_syn_ack)(const struct sock *sk,
  32. struct request_sock *req);
  33. void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
  34. struct request_sock *req);
  35. void (*send_reset)(const struct sock *sk,
  36. struct sk_buff *skb);
  37. void (*destructor)(struct request_sock *req);
  38. void (*syn_ack_timeout)(const struct request_sock *req);
  39. };
  40. int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
  41. /* struct request_sock - mini sock to represent a connection request
  42. */
  43. struct request_sock {
  44. struct sock_common __req_common;
  45. #define rsk_refcnt __req_common.skc_refcnt
  46. #define rsk_hash __req_common.skc_hash
  47. struct request_sock *dl_next;
  48. struct sock *rsk_listener;
  49. u16 mss;
  50. u8 num_retrans; /* number of retransmits */
  51. u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
  52. u8 num_timeout:7; /* number of timeouts */
  53. /* The following two fields can be easily recomputed I think -AK */
  54. u32 window_clamp; /* window clamp at creation time */
  55. u32 rcv_wnd; /* rcv_wnd offered first time */
  56. u32 ts_recent;
  57. struct timer_list rsk_timer;
  58. const struct request_sock_ops *rsk_ops;
  59. struct sock *sk;
  60. u32 *saved_syn;
  61. u32 secid;
  62. u32 peer_secid;
  63. };
  64. static inline struct request_sock *inet_reqsk(struct sock *sk)
  65. {
  66. return (struct request_sock *)sk;
  67. }
  68. static inline struct sock *req_to_sk(struct request_sock *req)
  69. {
  70. return (struct sock *)req;
  71. }
  72. static inline struct request_sock *
  73. reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
  74. {
  75. struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
  76. if (req) {
  77. req->rsk_ops = ops;
  78. sock_hold(sk_listener);
  79. req->rsk_listener = sk_listener;
  80. req_to_sk(req)->sk_prot = sk_listener->sk_prot;
  81. sk_node_init(&req_to_sk(req)->sk_node);
  82. req->saved_syn = NULL;
  83. /* Following is temporary. It is coupled with debugging
  84. * helpers in reqsk_put() & reqsk_free()
  85. */
  86. atomic_set(&req->rsk_refcnt, 0);
  87. }
  88. return req;
  89. }
  90. static inline void reqsk_free(struct request_sock *req)
  91. {
  92. /* temporary debugging */
  93. WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0);
  94. req->rsk_ops->destructor(req);
  95. if (req->rsk_listener)
  96. sock_put(req->rsk_listener);
  97. kfree(req->saved_syn);
  98. kmem_cache_free(req->rsk_ops->slab, req);
  99. }
  100. static inline void reqsk_put(struct request_sock *req)
  101. {
  102. if (atomic_dec_and_test(&req->rsk_refcnt))
  103. reqsk_free(req);
  104. }
  105. extern int sysctl_max_syn_backlog;
  106. /*
  107. * For a TCP Fast Open listener -
  108. * lock - protects the access to all the reqsk, which is co-owned by
  109. * the listener and the child socket.
  110. * qlen - pending TFO requests (still in TCP_SYN_RECV).
  111. * max_qlen - max TFO reqs allowed before TFO is disabled.
  112. *
  113. * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
  114. * structure above. But there is some implementation difficulty due to
  115. * listen_sock being part of request_sock_queue hence will be freed when
  116. * a listener is stopped. But TFO related fields may continue to be
  117. * accessed even after a listener is closed, until its sk_refcnt drops
  118. * to 0 implying no more outstanding TFO reqs. One solution is to keep
  119. * listen_opt around until sk_refcnt drops to 0. But there is some other
  120. * complexity that needs to be resolved. E.g., a listener can be disabled
  121. * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
  122. */
  123. struct fastopen_queue {
  124. struct request_sock *rskq_rst_head; /* Keep track of past TFO */
  125. struct request_sock *rskq_rst_tail; /* requests that caused RST.
  126. * This is part of the defense
  127. * against spoofing attack.
  128. */
  129. spinlock_t lock;
  130. int qlen; /* # of pending (TCP_SYN_RECV) reqs */
  131. int max_qlen; /* != 0 iff TFO is currently enabled */
  132. };
  133. /** struct request_sock_queue - queue of request_socks
  134. *
  135. * @rskq_accept_head - FIFO head of established children
  136. * @rskq_accept_tail - FIFO tail of established children
  137. * @rskq_defer_accept - User waits for some data after accept()
  138. *
  139. */
  140. struct request_sock_queue {
  141. spinlock_t rskq_lock;
  142. u8 rskq_defer_accept;
  143. u32 synflood_warned;
  144. atomic_t qlen;
  145. atomic_t young;
  146. struct request_sock *rskq_accept_head;
  147. struct request_sock *rskq_accept_tail;
  148. struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
  149. * if TFO is enabled.
  150. */
  151. };
  152. void reqsk_queue_alloc(struct request_sock_queue *queue);
  153. void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
  154. bool reset);
  155. static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
  156. {
  157. return queue->rskq_accept_head == NULL;
  158. }
  159. static inline void reqsk_queue_add(struct request_sock_queue *queue,
  160. struct request_sock *req,
  161. struct sock *parent,
  162. struct sock *child)
  163. {
  164. spin_lock(&queue->rskq_lock);
  165. req->sk = child;
  166. sk_acceptq_added(parent);
  167. if (queue->rskq_accept_head == NULL)
  168. queue->rskq_accept_head = req;
  169. else
  170. queue->rskq_accept_tail->dl_next = req;
  171. queue->rskq_accept_tail = req;
  172. req->dl_next = NULL;
  173. spin_unlock(&queue->rskq_lock);
  174. }
  175. static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
  176. struct sock *parent)
  177. {
  178. struct request_sock *req;
  179. spin_lock_bh(&queue->rskq_lock);
  180. req = queue->rskq_accept_head;
  181. if (req) {
  182. sk_acceptq_removed(parent);
  183. queue->rskq_accept_head = req->dl_next;
  184. if (queue->rskq_accept_head == NULL)
  185. queue->rskq_accept_tail = NULL;
  186. }
  187. spin_unlock_bh(&queue->rskq_lock);
  188. return req;
  189. }
  190. static inline void reqsk_queue_removed(struct request_sock_queue *queue,
  191. const struct request_sock *req)
  192. {
  193. if (req->num_timeout == 0)
  194. atomic_dec(&queue->young);
  195. atomic_dec(&queue->qlen);
  196. }
  197. static inline void reqsk_queue_added(struct request_sock_queue *queue)
  198. {
  199. atomic_inc(&queue->young);
  200. atomic_inc(&queue->qlen);
  201. }
  202. static inline int reqsk_queue_len(const struct request_sock_queue *queue)
  203. {
  204. return atomic_read(&queue->qlen);
  205. }
  206. static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
  207. {
  208. return atomic_read(&queue->young);
  209. }
  210. #endif /* _REQUEST_SOCK_H */