inet_connection_sock.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Support for INET connection oriented protocols.
  7. *
  8. * Authors: See the TCP sources
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or(at your option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/jhash.h>
  17. #include <net/inet_connection_sock.h>
  18. #include <net/inet_hashtables.h>
  19. #include <net/inet_timewait_sock.h>
  20. #include <net/ip.h>
  21. #include <net/route.h>
  22. #include <net/tcp_states.h>
  23. #include <net/xfrm.h>
  24. #ifdef INET_CSK_DEBUG
  25. const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
  26. EXPORT_SYMBOL(inet_csk_timer_bug_msg);
  27. #endif
  28. void inet_get_local_port_range(struct net *net, int *low, int *high)
  29. {
  30. unsigned int seq;
  31. do {
  32. seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
  33. *low = net->ipv4.ip_local_ports.range[0];
  34. *high = net->ipv4.ip_local_ports.range[1];
  35. } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
  36. }
  37. EXPORT_SYMBOL(inet_get_local_port_range);
  38. int inet_csk_bind_conflict(const struct sock *sk,
  39. const struct inet_bind_bucket *tb, bool relax)
  40. {
  41. struct sock *sk2;
  42. int reuse = sk->sk_reuse;
  43. int reuseport = sk->sk_reuseport;
  44. kuid_t uid = sock_i_uid((struct sock *)sk);
  45. /*
  46. * Unlike other sk lookup places we do not check
  47. * for sk_net here, since _all_ the socks listed
  48. * in tb->owners list belong to the same net - the
  49. * one this bucket belongs to.
  50. */
  51. sk_for_each_bound(sk2, &tb->owners) {
  52. if (sk != sk2 &&
  53. !inet_v6_ipv6only(sk2) &&
  54. (!sk->sk_bound_dev_if ||
  55. !sk2->sk_bound_dev_if ||
  56. sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
  57. if ((!reuse || !sk2->sk_reuse ||
  58. sk2->sk_state == TCP_LISTEN) &&
  59. (!reuseport || !sk2->sk_reuseport ||
  60. (sk2->sk_state != TCP_TIME_WAIT &&
  61. !uid_eq(uid, sock_i_uid(sk2))))) {
  62. if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
  63. sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
  64. break;
  65. }
  66. if (!relax && reuse && sk2->sk_reuse &&
  67. sk2->sk_state != TCP_LISTEN) {
  68. if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
  69. sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
  70. break;
  71. }
  72. }
  73. }
  74. return sk2 != NULL;
  75. }
  76. EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
  77. /* Obtain a reference to a local port for the given sock,
  78. * if snum is zero it means select any available local port.
  79. */
  80. int inet_csk_get_port(struct sock *sk, unsigned short snum)
  81. {
  82. struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  83. struct inet_bind_hashbucket *head;
  84. struct inet_bind_bucket *tb;
  85. int ret, attempts = 5;
  86. struct net *net = sock_net(sk);
  87. int smallest_size = -1, smallest_rover;
  88. kuid_t uid = sock_i_uid(sk);
  89. local_bh_disable();
  90. if (!snum) {
  91. int remaining, rover, low, high;
  92. again:
  93. inet_get_local_port_range(net, &low, &high);
  94. remaining = (high - low) + 1;
  95. smallest_rover = rover = prandom_u32() % remaining + low;
  96. smallest_size = -1;
  97. do {
  98. if (inet_is_local_reserved_port(net, rover))
  99. goto next_nolock;
  100. head = &hashinfo->bhash[inet_bhashfn(net, rover,
  101. hashinfo->bhash_size)];
  102. spin_lock(&head->lock);
  103. inet_bind_bucket_for_each(tb, &head->chain)
  104. if (net_eq(ib_net(tb), net) && tb->port == rover) {
  105. if (((tb->fastreuse > 0 &&
  106. sk->sk_reuse &&
  107. sk->sk_state != TCP_LISTEN) ||
  108. (tb->fastreuseport > 0 &&
  109. sk->sk_reuseport &&
  110. uid_eq(tb->fastuid, uid))) &&
  111. (tb->num_owners < smallest_size || smallest_size == -1)) {
  112. smallest_size = tb->num_owners;
  113. smallest_rover = rover;
  114. if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
  115. !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
  116. snum = smallest_rover;
  117. goto tb_found;
  118. }
  119. }
  120. if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
  121. snum = rover;
  122. goto tb_found;
  123. }
  124. goto next;
  125. }
  126. break;
  127. next:
  128. spin_unlock(&head->lock);
  129. next_nolock:
  130. if (++rover > high)
  131. rover = low;
  132. } while (--remaining > 0);
  133. /* Exhausted local port range during search? It is not
  134. * possible for us to be holding one of the bind hash
  135. * locks if this test triggers, because if 'remaining'
  136. * drops to zero, we broke out of the do/while loop at
  137. * the top level, not from the 'break;' statement.
  138. */
  139. ret = 1;
  140. if (remaining <= 0) {
  141. if (smallest_size != -1) {
  142. snum = smallest_rover;
  143. goto have_snum;
  144. }
  145. goto fail;
  146. }
  147. /* OK, here is the one we will use. HEAD is
  148. * non-NULL and we hold it's mutex.
  149. */
  150. snum = rover;
  151. } else {
  152. have_snum:
  153. head = &hashinfo->bhash[inet_bhashfn(net, snum,
  154. hashinfo->bhash_size)];
  155. spin_lock(&head->lock);
  156. inet_bind_bucket_for_each(tb, &head->chain)
  157. if (net_eq(ib_net(tb), net) && tb->port == snum)
  158. goto tb_found;
  159. }
  160. tb = NULL;
  161. goto tb_not_found;
  162. tb_found:
  163. if (!hlist_empty(&tb->owners)) {
  164. if (sk->sk_reuse == SK_FORCE_REUSE)
  165. goto success;
  166. if (((tb->fastreuse > 0 &&
  167. sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
  168. (tb->fastreuseport > 0 &&
  169. sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
  170. smallest_size == -1) {
  171. goto success;
  172. } else {
  173. ret = 1;
  174. if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
  175. if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
  176. (tb->fastreuseport > 0 &&
  177. sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
  178. smallest_size != -1 && --attempts >= 0) {
  179. spin_unlock(&head->lock);
  180. goto again;
  181. }
  182. goto fail_unlock;
  183. }
  184. }
  185. }
  186. tb_not_found:
  187. ret = 1;
  188. if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
  189. net, head, snum)) == NULL)
  190. goto fail_unlock;
  191. if (hlist_empty(&tb->owners)) {
  192. if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
  193. tb->fastreuse = 1;
  194. else
  195. tb->fastreuse = 0;
  196. if (sk->sk_reuseport) {
  197. tb->fastreuseport = 1;
  198. tb->fastuid = uid;
  199. } else
  200. tb->fastreuseport = 0;
  201. } else {
  202. if (tb->fastreuse &&
  203. (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
  204. tb->fastreuse = 0;
  205. if (tb->fastreuseport &&
  206. (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)))
  207. tb->fastreuseport = 0;
  208. }
  209. success:
  210. if (!inet_csk(sk)->icsk_bind_hash)
  211. inet_bind_hash(sk, tb, snum);
  212. WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
  213. ret = 0;
  214. fail_unlock:
  215. spin_unlock(&head->lock);
  216. fail:
  217. local_bh_enable();
  218. return ret;
  219. }
  220. EXPORT_SYMBOL_GPL(inet_csk_get_port);
  221. /*
  222. * Wait for an incoming connection, avoid race conditions. This must be called
  223. * with the socket locked.
  224. */
  225. static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
  226. {
  227. struct inet_connection_sock *icsk = inet_csk(sk);
  228. DEFINE_WAIT(wait);
  229. int err;
  230. /*
  231. * True wake-one mechanism for incoming connections: only
  232. * one process gets woken up, not the 'whole herd'.
  233. * Since we do not 'race & poll' for established sockets
  234. * anymore, the common case will execute the loop only once.
  235. *
  236. * Subtle issue: "add_wait_queue_exclusive()" will be added
  237. * after any current non-exclusive waiters, and we know that
  238. * it will always _stay_ after any new non-exclusive waiters
  239. * because all non-exclusive waiters are added at the
  240. * beginning of the wait-queue. As such, it's ok to "drop"
  241. * our exclusiveness temporarily when we get woken up without
  242. * having to remove and re-insert us on the wait queue.
  243. */
  244. for (;;) {
  245. prepare_to_wait_exclusive(sk_sleep(sk), &wait,
  246. TASK_INTERRUPTIBLE);
  247. release_sock(sk);
  248. if (reqsk_queue_empty(&icsk->icsk_accept_queue))
  249. timeo = schedule_timeout(timeo);
  250. sched_annotate_sleep();
  251. lock_sock(sk);
  252. err = 0;
  253. if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
  254. break;
  255. err = -EINVAL;
  256. if (sk->sk_state != TCP_LISTEN)
  257. break;
  258. err = sock_intr_errno(timeo);
  259. if (signal_pending(current))
  260. break;
  261. err = -EAGAIN;
  262. if (!timeo)
  263. break;
  264. }
  265. finish_wait(sk_sleep(sk), &wait);
  266. return err;
  267. }
  268. /*
  269. * This will accept the next outstanding connection.
  270. */
  271. struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
  272. {
  273. struct inet_connection_sock *icsk = inet_csk(sk);
  274. struct request_sock_queue *queue = &icsk->icsk_accept_queue;
  275. struct sock *newsk;
  276. struct request_sock *req;
  277. int error;
  278. lock_sock(sk);
  279. /* We need to make sure that this socket is listening,
  280. * and that it has something pending.
  281. */
  282. error = -EINVAL;
  283. if (sk->sk_state != TCP_LISTEN)
  284. goto out_err;
  285. /* Find already established connection */
  286. if (reqsk_queue_empty(queue)) {
  287. long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
  288. /* If this is a non blocking socket don't sleep */
  289. error = -EAGAIN;
  290. if (!timeo)
  291. goto out_err;
  292. error = inet_csk_wait_for_connect(sk, timeo);
  293. if (error)
  294. goto out_err;
  295. }
  296. req = reqsk_queue_remove(queue);
  297. newsk = req->sk;
  298. sk_acceptq_removed(sk);
  299. if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
  300. spin_lock_bh(&queue->fastopenq->lock);
  301. if (tcp_rsk(req)->listener) {
  302. /* We are still waiting for the final ACK from 3WHS
  303. * so can't free req now. Instead, we set req->sk to
  304. * NULL to signify that the child socket is taken
  305. * so reqsk_fastopen_remove() will free the req
  306. * when 3WHS finishes (or is aborted).
  307. */
  308. req->sk = NULL;
  309. req = NULL;
  310. }
  311. spin_unlock_bh(&queue->fastopenq->lock);
  312. }
  313. out:
  314. release_sock(sk);
  315. if (req)
  316. __reqsk_free(req);
  317. return newsk;
  318. out_err:
  319. newsk = NULL;
  320. req = NULL;
  321. *err = error;
  322. goto out;
  323. }
  324. EXPORT_SYMBOL(inet_csk_accept);
  325. /*
  326. * Using different timers for retransmit, delayed acks and probes
  327. * We may wish use just one timer maintaining a list of expire jiffies
  328. * to optimize.
  329. */
  330. void inet_csk_init_xmit_timers(struct sock *sk,
  331. void (*retransmit_handler)(unsigned long),
  332. void (*delack_handler)(unsigned long),
  333. void (*keepalive_handler)(unsigned long))
  334. {
  335. struct inet_connection_sock *icsk = inet_csk(sk);
  336. setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
  337. (unsigned long)sk);
  338. setup_timer(&icsk->icsk_delack_timer, delack_handler,
  339. (unsigned long)sk);
  340. setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
  341. icsk->icsk_pending = icsk->icsk_ack.pending = 0;
  342. }
  343. EXPORT_SYMBOL(inet_csk_init_xmit_timers);
  344. void inet_csk_clear_xmit_timers(struct sock *sk)
  345. {
  346. struct inet_connection_sock *icsk = inet_csk(sk);
  347. icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
  348. sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
  349. sk_stop_timer(sk, &icsk->icsk_delack_timer);
  350. sk_stop_timer(sk, &sk->sk_timer);
  351. }
  352. EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
  353. void inet_csk_delete_keepalive_timer(struct sock *sk)
  354. {
  355. sk_stop_timer(sk, &sk->sk_timer);
  356. }
  357. EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
  358. void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
  359. {
  360. sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
  361. }
  362. EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
  363. struct dst_entry *inet_csk_route_req(struct sock *sk,
  364. struct flowi4 *fl4,
  365. const struct request_sock *req)
  366. {
  367. struct rtable *rt;
  368. const struct inet_request_sock *ireq = inet_rsk(req);
  369. struct ip_options_rcu *opt = inet_rsk(req)->opt;
  370. struct net *net = sock_net(sk);
  371. int flags = inet_sk_flowi_flags(sk);
  372. flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
  373. RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
  374. sk->sk_protocol,
  375. flags,
  376. (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
  377. ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
  378. security_req_classify_flow(req, flowi4_to_flowi(fl4));
  379. rt = ip_route_output_flow(net, fl4, sk);
  380. if (IS_ERR(rt))
  381. goto no_route;
  382. if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
  383. goto route_err;
  384. return &rt->dst;
  385. route_err:
  386. ip_rt_put(rt);
  387. no_route:
  388. IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
  389. return NULL;
  390. }
  391. EXPORT_SYMBOL_GPL(inet_csk_route_req);
  392. struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
  393. struct sock *newsk,
  394. const struct request_sock *req)
  395. {
  396. const struct inet_request_sock *ireq = inet_rsk(req);
  397. struct inet_sock *newinet = inet_sk(newsk);
  398. struct ip_options_rcu *opt;
  399. struct net *net = sock_net(sk);
  400. struct flowi4 *fl4;
  401. struct rtable *rt;
  402. fl4 = &newinet->cork.fl.u.ip4;
  403. rcu_read_lock();
  404. opt = rcu_dereference(newinet->inet_opt);
  405. flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
  406. RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
  407. sk->sk_protocol, inet_sk_flowi_flags(sk),
  408. (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
  409. ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
  410. security_req_classify_flow(req, flowi4_to_flowi(fl4));
  411. rt = ip_route_output_flow(net, fl4, sk);
  412. if (IS_ERR(rt))
  413. goto no_route;
  414. if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
  415. goto route_err;
  416. rcu_read_unlock();
  417. return &rt->dst;
  418. route_err:
  419. ip_rt_put(rt);
  420. no_route:
  421. rcu_read_unlock();
  422. IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
  423. return NULL;
  424. }
  425. EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
  426. static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
  427. const u32 rnd, const u32 synq_hsize)
  428. {
  429. return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
  430. }
  431. #if IS_ENABLED(CONFIG_IPV6)
  432. #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
  433. #else
  434. #define AF_INET_FAMILY(fam) 1
  435. #endif
  436. struct request_sock *inet_csk_search_req(const struct sock *sk,
  437. struct request_sock ***prevp,
  438. const __be16 rport, const __be32 raddr,
  439. const __be32 laddr)
  440. {
  441. const struct inet_connection_sock *icsk = inet_csk(sk);
  442. struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
  443. struct request_sock *req, **prev;
  444. for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
  445. lopt->nr_table_entries)];
  446. (req = *prev) != NULL;
  447. prev = &req->dl_next) {
  448. const struct inet_request_sock *ireq = inet_rsk(req);
  449. if (ireq->ir_rmt_port == rport &&
  450. ireq->ir_rmt_addr == raddr &&
  451. ireq->ir_loc_addr == laddr &&
  452. AF_INET_FAMILY(req->rsk_ops->family)) {
  453. WARN_ON(req->sk);
  454. *prevp = prev;
  455. break;
  456. }
  457. }
  458. return req;
  459. }
  460. EXPORT_SYMBOL_GPL(inet_csk_search_req);
  461. void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
  462. unsigned long timeout)
  463. {
  464. struct inet_connection_sock *icsk = inet_csk(sk);
  465. struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
  466. const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
  467. inet_rsk(req)->ir_rmt_port,
  468. lopt->hash_rnd, lopt->nr_table_entries);
  469. reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
  470. inet_csk_reqsk_queue_added(sk, timeout);
  471. }
  472. EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
  473. /* Only thing we need from tcp.h */
  474. extern int sysctl_tcp_synack_retries;
  475. /* Decide when to expire the request and when to resend SYN-ACK */
  476. static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
  477. const int max_retries,
  478. const u8 rskq_defer_accept,
  479. int *expire, int *resend)
  480. {
  481. if (!rskq_defer_accept) {
  482. *expire = req->num_timeout >= thresh;
  483. *resend = 1;
  484. return;
  485. }
  486. *expire = req->num_timeout >= thresh &&
  487. (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
  488. /*
  489. * Do not resend while waiting for data after ACK,
  490. * start to resend on end of deferring period to give
  491. * last chance for data or ACK to create established socket.
  492. */
  493. *resend = !inet_rsk(req)->acked ||
  494. req->num_timeout >= rskq_defer_accept - 1;
  495. }
  496. int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
  497. {
  498. int err = req->rsk_ops->rtx_syn_ack(parent, req);
  499. if (!err)
  500. req->num_retrans++;
  501. return err;
  502. }
  503. EXPORT_SYMBOL(inet_rtx_syn_ack);
  504. void inet_csk_reqsk_queue_prune(struct sock *parent,
  505. const unsigned long interval,
  506. const unsigned long timeout,
  507. const unsigned long max_rto)
  508. {
  509. struct inet_connection_sock *icsk = inet_csk(parent);
  510. struct request_sock_queue *queue = &icsk->icsk_accept_queue;
  511. struct listen_sock *lopt = queue->listen_opt;
  512. int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
  513. int thresh = max_retries;
  514. unsigned long now = jiffies;
  515. struct request_sock **reqp, *req;
  516. int i, budget;
  517. if (lopt == NULL || lopt->qlen == 0)
  518. return;
  519. /* Normally all the openreqs are young and become mature
  520. * (i.e. converted to established socket) for first timeout.
  521. * If synack was not acknowledged for 1 second, it means
  522. * one of the following things: synack was lost, ack was lost,
  523. * rtt is high or nobody planned to ack (i.e. synflood).
  524. * When server is a bit loaded, queue is populated with old
  525. * open requests, reducing effective size of queue.
  526. * When server is well loaded, queue size reduces to zero
  527. * after several minutes of work. It is not synflood,
  528. * it is normal operation. The solution is pruning
  529. * too old entries overriding normal timeout, when
  530. * situation becomes dangerous.
  531. *
  532. * Essentially, we reserve half of room for young
  533. * embrions; and abort old ones without pity, if old
  534. * ones are about to clog our table.
  535. */
  536. if (lopt->qlen>>(lopt->max_qlen_log-1)) {
  537. int young = (lopt->qlen_young<<1);
  538. while (thresh > 2) {
  539. if (lopt->qlen < young)
  540. break;
  541. thresh--;
  542. young <<= 1;
  543. }
  544. }
  545. if (queue->rskq_defer_accept)
  546. max_retries = queue->rskq_defer_accept;
  547. budget = 2 * (lopt->nr_table_entries / (timeout / interval));
  548. i = lopt->clock_hand;
  549. do {
  550. reqp=&lopt->syn_table[i];
  551. while ((req = *reqp) != NULL) {
  552. if (time_after_eq(now, req->expires)) {
  553. int expire = 0, resend = 0;
  554. syn_ack_recalc(req, thresh, max_retries,
  555. queue->rskq_defer_accept,
  556. &expire, &resend);
  557. req->rsk_ops->syn_ack_timeout(parent, req);
  558. if (!expire &&
  559. (!resend ||
  560. !inet_rtx_syn_ack(parent, req) ||
  561. inet_rsk(req)->acked)) {
  562. unsigned long timeo;
  563. if (req->num_timeout++ == 0)
  564. lopt->qlen_young--;
  565. timeo = min(timeout << req->num_timeout,
  566. max_rto);
  567. req->expires = now + timeo;
  568. reqp = &req->dl_next;
  569. continue;
  570. }
  571. /* Drop this request */
  572. inet_csk_reqsk_queue_unlink(parent, req, reqp);
  573. reqsk_queue_removed(queue, req);
  574. reqsk_free(req);
  575. continue;
  576. }
  577. reqp = &req->dl_next;
  578. }
  579. i = (i + 1) & (lopt->nr_table_entries - 1);
  580. } while (--budget > 0);
  581. lopt->clock_hand = i;
  582. if (lopt->qlen)
  583. inet_csk_reset_keepalive_timer(parent, interval);
  584. }
  585. EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
  586. /**
  587. * inet_csk_clone_lock - clone an inet socket, and lock its clone
  588. * @sk: the socket to clone
  589. * @req: request_sock
  590. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  591. *
  592. * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
  593. */
  594. struct sock *inet_csk_clone_lock(const struct sock *sk,
  595. const struct request_sock *req,
  596. const gfp_t priority)
  597. {
  598. struct sock *newsk = sk_clone_lock(sk, priority);
  599. if (newsk != NULL) {
  600. struct inet_connection_sock *newicsk = inet_csk(newsk);
  601. newsk->sk_state = TCP_SYN_RECV;
  602. newicsk->icsk_bind_hash = NULL;
  603. inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
  604. inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
  605. inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
  606. newsk->sk_write_space = sk_stream_write_space;
  607. newsk->sk_mark = inet_rsk(req)->ir_mark;
  608. newicsk->icsk_retransmits = 0;
  609. newicsk->icsk_backoff = 0;
  610. newicsk->icsk_probes_out = 0;
  611. /* Deinitialize accept_queue to trap illegal accesses. */
  612. memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
  613. security_inet_csk_clone(newsk, req);
  614. }
  615. return newsk;
  616. }
  617. EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
  618. /*
  619. * At this point, there should be no process reference to this
  620. * socket, and thus no user references at all. Therefore we
  621. * can assume the socket waitqueue is inactive and nobody will
  622. * try to jump onto it.
  623. */
  624. void inet_csk_destroy_sock(struct sock *sk)
  625. {
  626. WARN_ON(sk->sk_state != TCP_CLOSE);
  627. WARN_ON(!sock_flag(sk, SOCK_DEAD));
  628. /* It cannot be in hash table! */
  629. WARN_ON(!sk_unhashed(sk));
  630. /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
  631. WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
  632. sk->sk_prot->destroy(sk);
  633. sk_stream_kill_queues(sk);
  634. xfrm_sk_free_policy(sk);
  635. sk_refcnt_debug_release(sk);
  636. percpu_counter_dec(sk->sk_prot->orphan_count);
  637. sock_put(sk);
  638. }
  639. EXPORT_SYMBOL(inet_csk_destroy_sock);
  640. /* This function allows to force a closure of a socket after the call to
  641. * tcp/dccp_create_openreq_child().
  642. */
  643. void inet_csk_prepare_forced_close(struct sock *sk)
  644. __releases(&sk->sk_lock.slock)
  645. {
  646. /* sk_clone_lock locked the socket and set refcnt to 2 */
  647. bh_unlock_sock(sk);
  648. sock_put(sk);
  649. /* The below has to be done to allow calling inet_csk_destroy_sock */
  650. sock_set_flag(sk, SOCK_DEAD);
  651. percpu_counter_inc(sk->sk_prot->orphan_count);
  652. inet_sk(sk)->inet_num = 0;
  653. }
  654. EXPORT_SYMBOL(inet_csk_prepare_forced_close);
  655. int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
  656. {
  657. struct inet_sock *inet = inet_sk(sk);
  658. struct inet_connection_sock *icsk = inet_csk(sk);
  659. int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
  660. if (rc != 0)
  661. return rc;
  662. sk->sk_max_ack_backlog = 0;
  663. sk->sk_ack_backlog = 0;
  664. inet_csk_delack_init(sk);
  665. /* There is race window here: we announce ourselves listening,
  666. * but this transition is still not validated by get_port().
  667. * It is OK, because this socket enters to hash table only
  668. * after validation is complete.
  669. */
  670. sk->sk_state = TCP_LISTEN;
  671. if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
  672. inet->inet_sport = htons(inet->inet_num);
  673. sk_dst_reset(sk);
  674. sk->sk_prot->hash(sk);
  675. return 0;
  676. }
  677. sk->sk_state = TCP_CLOSE;
  678. __reqsk_queue_destroy(&icsk->icsk_accept_queue);
  679. return -EADDRINUSE;
  680. }
  681. EXPORT_SYMBOL_GPL(inet_csk_listen_start);
  682. /*
  683. * This routine closes sockets which have been at least partially
  684. * opened, but not yet accepted.
  685. */
  686. void inet_csk_listen_stop(struct sock *sk)
  687. {
  688. struct inet_connection_sock *icsk = inet_csk(sk);
  689. struct request_sock_queue *queue = &icsk->icsk_accept_queue;
  690. struct request_sock *acc_req;
  691. struct request_sock *req;
  692. inet_csk_delete_keepalive_timer(sk);
  693. /* make all the listen_opt local to us */
  694. acc_req = reqsk_queue_yank_acceptq(queue);
  695. /* Following specs, it would be better either to send FIN
  696. * (and enter FIN-WAIT-1, it is normal close)
  697. * or to send active reset (abort).
  698. * Certainly, it is pretty dangerous while synflood, but it is
  699. * bad justification for our negligence 8)
  700. * To be honest, we are not able to make either
  701. * of the variants now. --ANK
  702. */
  703. reqsk_queue_destroy(queue);
  704. while ((req = acc_req) != NULL) {
  705. struct sock *child = req->sk;
  706. acc_req = req->dl_next;
  707. local_bh_disable();
  708. bh_lock_sock(child);
  709. WARN_ON(sock_owned_by_user(child));
  710. sock_hold(child);
  711. sk->sk_prot->disconnect(child, O_NONBLOCK);
  712. sock_orphan(child);
  713. percpu_counter_inc(sk->sk_prot->orphan_count);
  714. if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) {
  715. BUG_ON(tcp_sk(child)->fastopen_rsk != req);
  716. BUG_ON(sk != tcp_rsk(req)->listener);
  717. /* Paranoid, to prevent race condition if
  718. * an inbound pkt destined for child is
  719. * blocked by sock lock in tcp_v4_rcv().
  720. * Also to satisfy an assertion in
  721. * tcp_v4_destroy_sock().
  722. */
  723. tcp_sk(child)->fastopen_rsk = NULL;
  724. sock_put(sk);
  725. }
  726. inet_csk_destroy_sock(child);
  727. bh_unlock_sock(child);
  728. local_bh_enable();
  729. sock_put(child);
  730. sk_acceptq_removed(sk);
  731. __reqsk_free(req);
  732. }
  733. if (queue->fastopenq != NULL) {
  734. /* Free all the reqs queued in rskq_rst_head. */
  735. spin_lock_bh(&queue->fastopenq->lock);
  736. acc_req = queue->fastopenq->rskq_rst_head;
  737. queue->fastopenq->rskq_rst_head = NULL;
  738. spin_unlock_bh(&queue->fastopenq->lock);
  739. while ((req = acc_req) != NULL) {
  740. acc_req = req->dl_next;
  741. __reqsk_free(req);
  742. }
  743. }
  744. WARN_ON(sk->sk_ack_backlog);
  745. }
  746. EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
  747. void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
  748. {
  749. struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
  750. const struct inet_sock *inet = inet_sk(sk);
  751. sin->sin_family = AF_INET;
  752. sin->sin_addr.s_addr = inet->inet_daddr;
  753. sin->sin_port = inet->inet_dport;
  754. }
  755. EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
  756. #ifdef CONFIG_COMPAT
  757. int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
  758. char __user *optval, int __user *optlen)
  759. {
  760. const struct inet_connection_sock *icsk = inet_csk(sk);
  761. if (icsk->icsk_af_ops->compat_getsockopt != NULL)
  762. return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
  763. optval, optlen);
  764. return icsk->icsk_af_ops->getsockopt(sk, level, optname,
  765. optval, optlen);
  766. }
  767. EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
  768. int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
  769. char __user *optval, unsigned int optlen)
  770. {
  771. const struct inet_connection_sock *icsk = inet_csk(sk);
  772. if (icsk->icsk_af_ops->compat_setsockopt != NULL)
  773. return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
  774. optval, optlen);
  775. return icsk->icsk_af_ops->setsockopt(sk, level, optname,
  776. optval, optlen);
  777. }
  778. EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
  779. #endif
  780. static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
  781. {
  782. const struct inet_sock *inet = inet_sk(sk);
  783. const struct ip_options_rcu *inet_opt;
  784. __be32 daddr = inet->inet_daddr;
  785. struct flowi4 *fl4;
  786. struct rtable *rt;
  787. rcu_read_lock();
  788. inet_opt = rcu_dereference(inet->inet_opt);
  789. if (inet_opt && inet_opt->opt.srr)
  790. daddr = inet_opt->opt.faddr;
  791. fl4 = &fl->u.ip4;
  792. rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
  793. inet->inet_saddr, inet->inet_dport,
  794. inet->inet_sport, sk->sk_protocol,
  795. RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
  796. if (IS_ERR(rt))
  797. rt = NULL;
  798. if (rt)
  799. sk_setup_caps(sk, &rt->dst);
  800. rcu_read_unlock();
  801. return &rt->dst;
  802. }
  803. struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
  804. {
  805. struct dst_entry *dst = __sk_dst_check(sk, 0);
  806. struct inet_sock *inet = inet_sk(sk);
  807. if (!dst) {
  808. dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
  809. if (!dst)
  810. goto out;
  811. }
  812. dst->ops->update_pmtu(dst, sk, NULL, mtu);
  813. dst = __sk_dst_check(sk, 0);
  814. if (!dst)
  815. dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
  816. out:
  817. return dst;
  818. }
  819. EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);