minisocks.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /*
  2. * net/dccp/minisocks.c
  3. *
  4. * An implementation of the DCCP protocol
  5. * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/config.h>
  13. #include <linux/dccp.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/timer.h>
  16. #include <net/sock.h>
  17. #include <net/xfrm.h>
  18. #include <net/inet_timewait_sock.h>
  19. #include "ccid.h"
  20. #include "dccp.h"
  21. struct inet_timewait_death_row dccp_death_row = {
  22. .sysctl_max_tw_buckets = NR_FILE * 2,
  23. .period = DCCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
  24. .death_lock = SPIN_LOCK_UNLOCKED,
  25. .hashinfo = &dccp_hashinfo,
  26. .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
  27. (unsigned long)&dccp_death_row),
  28. .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
  29. inet_twdr_twkill_work,
  30. &dccp_death_row),
  31. /* Short-time timewait calendar */
  32. .twcal_hand = -1,
  33. .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
  34. (unsigned long)&dccp_death_row),
  35. };
  36. void dccp_time_wait(struct sock *sk, int state, int timeo)
  37. {
  38. struct inet_timewait_sock *tw = NULL;
  39. if (dccp_death_row.tw_count < dccp_death_row.sysctl_max_tw_buckets)
  40. tw = inet_twsk_alloc(sk, state);
  41. if (tw != NULL) {
  42. const struct inet_connection_sock *icsk = inet_csk(sk);
  43. const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
  44. /* Linkage updates. */
  45. __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
  46. /* Get the TIME_WAIT timeout firing. */
  47. if (timeo < rto)
  48. timeo = rto;
  49. tw->tw_timeout = DCCP_TIMEWAIT_LEN;
  50. if (state == DCCP_TIME_WAIT)
  51. timeo = DCCP_TIMEWAIT_LEN;
  52. inet_twsk_schedule(tw, &dccp_death_row, timeo,
  53. DCCP_TIMEWAIT_LEN);
  54. inet_twsk_put(tw);
  55. } else {
  56. /* Sorry, if we're out of memory, just CLOSE this
  57. * socket up. We've got bigger problems than
  58. * non-graceful socket closings.
  59. */
  60. LIMIT_NETDEBUG(KERN_INFO "DCCP: time wait bucket "
  61. "table overflow\n");
  62. }
  63. dccp_done(sk);
  64. }
  65. struct sock *dccp_create_openreq_child(struct sock *sk,
  66. const struct request_sock *req,
  67. const struct sk_buff *skb)
  68. {
  69. /*
  70. * Step 3: Process LISTEN state
  71. *
  72. * // Generate a new socket and switch to that socket
  73. * Set S := new socket for this port pair
  74. */
  75. struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
  76. if (newsk != NULL) {
  77. const struct dccp_request_sock *dreq = dccp_rsk(req);
  78. struct inet_connection_sock *newicsk = inet_csk(sk);
  79. struct dccp_sock *newdp = dccp_sk(newsk);
  80. newdp->dccps_hc_rx_ackpkts = NULL;
  81. newdp->dccps_role = DCCP_ROLE_SERVER;
  82. newicsk->icsk_rto = DCCP_TIMEOUT_INIT;
  83. if (newdp->dccps_options.dccpo_send_ack_vector) {
  84. newdp->dccps_hc_rx_ackpkts =
  85. dccp_ackpkts_alloc(DCCP_MAX_ACK_VECTOR_LEN,
  86. GFP_ATOMIC);
  87. /*
  88. * XXX: We're using the same CCIDs set on the parent,
  89. * i.e. sk_clone copied the master sock and left the
  90. * CCID pointers for this child, that is why we do the
  91. * __ccid_get calls.
  92. */
  93. if (unlikely(newdp->dccps_hc_rx_ackpkts == NULL))
  94. goto out_free;
  95. }
  96. if (unlikely(ccid_hc_rx_init(newdp->dccps_hc_rx_ccid,
  97. newsk) != 0 ||
  98. ccid_hc_tx_init(newdp->dccps_hc_tx_ccid,
  99. newsk) != 0)) {
  100. dccp_ackpkts_free(newdp->dccps_hc_rx_ackpkts);
  101. ccid_hc_rx_exit(newdp->dccps_hc_rx_ccid, newsk);
  102. ccid_hc_tx_exit(newdp->dccps_hc_tx_ccid, newsk);
  103. out_free:
  104. /* It is still raw copy of parent, so invalidate
  105. * destructor and make plain sk_free() */
  106. newsk->sk_destruct = NULL;
  107. sk_free(newsk);
  108. return NULL;
  109. }
  110. __ccid_get(newdp->dccps_hc_rx_ccid);
  111. __ccid_get(newdp->dccps_hc_tx_ccid);
  112. /*
  113. * Step 3: Process LISTEN state
  114. *
  115. * Choose S.ISS (initial seqno) or set from Init Cookie
  116. * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
  117. * Cookie
  118. */
  119. /* See dccp_v4_conn_request */
  120. newdp->dccps_options.dccpo_sequence_window = req->rcv_wnd;
  121. newdp->dccps_gar = newdp->dccps_isr = dreq->dreq_isr;
  122. dccp_update_gsr(newsk, dreq->dreq_isr);
  123. newdp->dccps_iss = dreq->dreq_iss;
  124. dccp_update_gss(newsk, dreq->dreq_iss);
  125. dccp_init_xmit_timers(newsk);
  126. DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS);
  127. }
  128. return newsk;
  129. }
  130. /*
  131. * Process an incoming packet for RESPOND sockets represented
  132. * as an request_sock.
  133. */
  134. struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
  135. struct request_sock *req,
  136. struct request_sock **prev)
  137. {
  138. struct sock *child = NULL;
  139. /* Check for retransmitted REQUEST */
  140. if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
  141. if (after48(DCCP_SKB_CB(skb)->dccpd_seq,
  142. dccp_rsk(req)->dreq_isr)) {
  143. struct dccp_request_sock *dreq = dccp_rsk(req);
  144. dccp_pr_debug("Retransmitted REQUEST\n");
  145. /* Send another RESPONSE packet */
  146. dccp_set_seqno(&dreq->dreq_iss, dreq->dreq_iss + 1);
  147. dccp_set_seqno(&dreq->dreq_isr,
  148. DCCP_SKB_CB(skb)->dccpd_seq);
  149. req->rsk_ops->rtx_syn_ack(sk, req, NULL);
  150. }
  151. /* Network Duplicate, discard packet */
  152. return NULL;
  153. }
  154. DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
  155. if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK &&
  156. dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK)
  157. goto drop;
  158. /* Invalid ACK */
  159. if (DCCP_SKB_CB(skb)->dccpd_ack_seq != dccp_rsk(req)->dreq_iss) {
  160. dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
  161. "dreq_iss=%llu\n",
  162. (unsigned long long)
  163. DCCP_SKB_CB(skb)->dccpd_ack_seq,
  164. (unsigned long long)
  165. dccp_rsk(req)->dreq_iss);
  166. goto drop;
  167. }
  168. child = dccp_v4_request_recv_sock(sk, skb, req, NULL);
  169. if (child == NULL)
  170. goto listen_overflow;
  171. /* FIXME: deal with options */
  172. inet_csk_reqsk_queue_unlink(sk, req, prev);
  173. inet_csk_reqsk_queue_removed(sk, req);
  174. inet_csk_reqsk_queue_add(sk, req, child);
  175. out:
  176. return child;
  177. listen_overflow:
  178. dccp_pr_debug("listen_overflow!\n");
  179. DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
  180. drop:
  181. if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
  182. req->rsk_ops->send_reset(skb);
  183. inet_csk_reqsk_queue_drop(sk, req, prev);
  184. goto out;
  185. }
  186. /*
  187. * Queue segment on the new socket if the new socket is active,
  188. * otherwise we just shortcircuit this and continue with
  189. * the new socket.
  190. */
  191. int dccp_child_process(struct sock *parent, struct sock *child,
  192. struct sk_buff *skb)
  193. {
  194. int ret = 0;
  195. const int state = child->sk_state;
  196. if (!sock_owned_by_user(child)) {
  197. ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb),
  198. skb->len);
  199. /* Wakeup parent, send SIGIO */
  200. if (state == DCCP_RESPOND && child->sk_state != state)
  201. parent->sk_data_ready(parent, 0);
  202. } else {
  203. /* Alas, it is possible again, because we do lookup
  204. * in main socket hash table and lock on listening
  205. * socket does not protect us more.
  206. */
  207. sk_add_backlog(child, skb);
  208. }
  209. bh_unlock_sock(child);
  210. sock_put(child);
  211. return ret;
  212. }