tcp_timer.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Implementation of the Transmission Control Protocol(TCP).
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  11. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14. * Linus Torvalds, <torvalds@cs.helsinki.fi>
  15. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  16. * Matthew Dillon, <dillon@apollo.west.oic.com>
  17. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18. * Jorge Cwik, <jorge@laser.satlink.net>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/gfp.h>
  22. #include <net/tcp.h>
  23. int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
  24. int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
  25. int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
  26. int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
  27. int sysctl_tcp_orphan_retries __read_mostly;
  28. int sysctl_tcp_thin_linear_timeouts __read_mostly;
  29. static void tcp_write_err(struct sock *sk)
  30. {
  31. sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
  32. sk->sk_error_report(sk);
  33. tcp_done(sk);
  34. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
  35. }
  36. /* Do not allow orphaned sockets to eat all our resources.
  37. * This is direct violation of TCP specs, but it is required
  38. * to prevent DoS attacks. It is called when a retransmission timeout
  39. * or zero probe timeout occurs on orphaned socket.
  40. *
  41. * Criteria is still not confirmed experimentally and may change.
  42. * We kill the socket, if:
  43. * 1. If number of orphaned sockets exceeds an administratively configured
  44. * limit.
  45. * 2. If we have strong memory pressure.
  46. */
  47. static int tcp_out_of_resources(struct sock *sk, bool do_reset)
  48. {
  49. struct tcp_sock *tp = tcp_sk(sk);
  50. int shift = 0;
  51. /* If peer does not open window for long time, or did not transmit
  52. * anything for long time, penalize it. */
  53. if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
  54. shift++;
  55. /* If some dubious ICMP arrived, penalize even more. */
  56. if (sk->sk_err_soft)
  57. shift++;
  58. if (tcp_check_oom(sk, shift)) {
  59. /* Catch exceptional cases, when connection requires reset.
  60. * 1. Last segment was sent recently. */
  61. if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
  62. /* 2. Window is closed. */
  63. (!tp->snd_wnd && !tp->packets_out))
  64. do_reset = true;
  65. if (do_reset)
  66. tcp_send_active_reset(sk, GFP_ATOMIC);
  67. tcp_done(sk);
  68. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
  69. return 1;
  70. }
  71. return 0;
  72. }
  73. /* Calculate maximal number or retries on an orphaned socket. */
  74. static int tcp_orphan_retries(struct sock *sk, bool alive)
  75. {
  76. int retries = sysctl_tcp_orphan_retries; /* May be zero. */
  77. /* We know from an ICMP that something is wrong. */
  78. if (sk->sk_err_soft && !alive)
  79. retries = 0;
  80. /* However, if socket sent something recently, select some safe
  81. * number of retries. 8 corresponds to >100 seconds with minimal
  82. * RTO of 200msec. */
  83. if (retries == 0 && alive)
  84. retries = 8;
  85. return retries;
  86. }
  87. static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
  88. {
  89. struct net *net = sock_net(sk);
  90. /* Black hole detection */
  91. if (net->ipv4.sysctl_tcp_mtu_probing) {
  92. if (!icsk->icsk_mtup.enabled) {
  93. icsk->icsk_mtup.enabled = 1;
  94. icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
  95. tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
  96. } else {
  97. struct net *net = sock_net(sk);
  98. struct tcp_sock *tp = tcp_sk(sk);
  99. int mss;
  100. mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
  101. mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
  102. mss = max(mss, 68 - tp->tcp_header_len);
  103. icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
  104. tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
  105. }
  106. }
  107. }
  108. /* This function calculates a "timeout" which is equivalent to the timeout of a
  109. * TCP connection after "boundary" unsuccessful, exponentially backed-off
  110. * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
  111. * syn_set flag is set.
  112. */
  113. static bool retransmits_timed_out(struct sock *sk,
  114. unsigned int boundary,
  115. unsigned int timeout,
  116. bool syn_set)
  117. {
  118. unsigned int linear_backoff_thresh, start_ts;
  119. unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
  120. if (!inet_csk(sk)->icsk_retransmits)
  121. return false;
  122. start_ts = tcp_sk(sk)->retrans_stamp;
  123. if (unlikely(!start_ts))
  124. start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
  125. if (likely(timeout == 0)) {
  126. linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
  127. if (boundary <= linear_backoff_thresh)
  128. timeout = ((2 << boundary) - 1) * rto_base;
  129. else
  130. timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
  131. (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
  132. }
  133. return (tcp_time_stamp - start_ts) >= timeout;
  134. }
  135. /* A write timeout has occurred. Process the after effects. */
  136. static int tcp_write_timeout(struct sock *sk)
  137. {
  138. struct inet_connection_sock *icsk = inet_csk(sk);
  139. struct tcp_sock *tp = tcp_sk(sk);
  140. int retry_until;
  141. bool do_reset, syn_set = false;
  142. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
  143. if (icsk->icsk_retransmits) {
  144. dst_negative_advice(sk);
  145. if (tp->syn_fastopen || tp->syn_data)
  146. tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
  147. if (tp->syn_data && icsk->icsk_retransmits == 1)
  148. NET_INC_STATS_BH(sock_net(sk),
  149. LINUX_MIB_TCPFASTOPENACTIVEFAIL);
  150. }
  151. retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
  152. syn_set = true;
  153. } else {
  154. if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
  155. /* Some middle-boxes may black-hole Fast Open _after_
  156. * the handshake. Therefore we conservatively disable
  157. * Fast Open on this path on recurring timeouts with
  158. * few or zero bytes acked after Fast Open.
  159. */
  160. if (tp->syn_data_acked &&
  161. tp->bytes_acked <= tp->rx_opt.mss_clamp) {
  162. tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
  163. if (icsk->icsk_retransmits == sysctl_tcp_retries1)
  164. NET_INC_STATS_BH(sock_net(sk),
  165. LINUX_MIB_TCPFASTOPENACTIVEFAIL);
  166. }
  167. /* Black hole detection */
  168. tcp_mtu_probing(icsk, sk);
  169. dst_negative_advice(sk);
  170. }
  171. retry_until = sysctl_tcp_retries2;
  172. if (sock_flag(sk, SOCK_DEAD)) {
  173. const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
  174. retry_until = tcp_orphan_retries(sk, alive);
  175. do_reset = alive ||
  176. !retransmits_timed_out(sk, retry_until, 0, 0);
  177. if (tcp_out_of_resources(sk, do_reset))
  178. return 1;
  179. }
  180. }
  181. if (retransmits_timed_out(sk, retry_until,
  182. syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
  183. /* Has it gone just too far? */
  184. tcp_write_err(sk);
  185. return 1;
  186. }
  187. return 0;
  188. }
  189. void tcp_delack_timer_handler(struct sock *sk)
  190. {
  191. struct tcp_sock *tp = tcp_sk(sk);
  192. struct inet_connection_sock *icsk = inet_csk(sk);
  193. sk_mem_reclaim_partial(sk);
  194. if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
  195. goto out;
  196. if (time_after(icsk->icsk_ack.timeout, jiffies)) {
  197. sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
  198. goto out;
  199. }
  200. icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
  201. if (!skb_queue_empty(&tp->ucopy.prequeue)) {
  202. struct sk_buff *skb;
  203. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
  204. while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
  205. sk_backlog_rcv(sk, skb);
  206. tp->ucopy.memory = 0;
  207. }
  208. if (inet_csk_ack_scheduled(sk)) {
  209. if (!icsk->icsk_ack.pingpong) {
  210. /* Delayed ACK missed: inflate ATO. */
  211. icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
  212. } else {
  213. /* Delayed ACK missed: leave pingpong mode and
  214. * deflate ATO.
  215. */
  216. icsk->icsk_ack.pingpong = 0;
  217. icsk->icsk_ack.ato = TCP_ATO_MIN;
  218. }
  219. tcp_send_ack(sk);
  220. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
  221. }
  222. out:
  223. if (tcp_under_memory_pressure(sk))
  224. sk_mem_reclaim(sk);
  225. }
  226. static void tcp_delack_timer(unsigned long data)
  227. {
  228. struct sock *sk = (struct sock *)data;
  229. bh_lock_sock(sk);
  230. if (!sock_owned_by_user(sk)) {
  231. tcp_delack_timer_handler(sk);
  232. } else {
  233. inet_csk(sk)->icsk_ack.blocked = 1;
  234. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
  235. /* deleguate our work to tcp_release_cb() */
  236. if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
  237. sock_hold(sk);
  238. }
  239. bh_unlock_sock(sk);
  240. sock_put(sk);
  241. }
  242. static void tcp_probe_timer(struct sock *sk)
  243. {
  244. struct inet_connection_sock *icsk = inet_csk(sk);
  245. struct tcp_sock *tp = tcp_sk(sk);
  246. int max_probes;
  247. u32 start_ts;
  248. if (tp->packets_out || !tcp_send_head(sk)) {
  249. icsk->icsk_probes_out = 0;
  250. return;
  251. }
  252. /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
  253. * long as the receiver continues to respond probes. We support this by
  254. * default and reset icsk_probes_out with incoming ACKs. But if the
  255. * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
  256. * kill the socket when the retry count and the time exceeds the
  257. * corresponding system limit. We also implement similar policy when
  258. * we use RTO to probe window in tcp_retransmit_timer().
  259. */
  260. start_ts = tcp_skb_timestamp(tcp_send_head(sk));
  261. if (!start_ts)
  262. skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
  263. else if (icsk->icsk_user_timeout &&
  264. (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
  265. goto abort;
  266. max_probes = sysctl_tcp_retries2;
  267. if (sock_flag(sk, SOCK_DEAD)) {
  268. const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
  269. max_probes = tcp_orphan_retries(sk, alive);
  270. if (!alive && icsk->icsk_backoff >= max_probes)
  271. goto abort;
  272. if (tcp_out_of_resources(sk, true))
  273. return;
  274. }
  275. if (icsk->icsk_probes_out > max_probes) {
  276. abort: tcp_write_err(sk);
  277. } else {
  278. /* Only send another probe if we didn't close things up. */
  279. tcp_send_probe0(sk);
  280. }
  281. }
  282. /*
  283. * Timer for Fast Open socket to retransmit SYNACK. Note that the
  284. * sk here is the child socket, not the parent (listener) socket.
  285. */
  286. static void tcp_fastopen_synack_timer(struct sock *sk)
  287. {
  288. struct inet_connection_sock *icsk = inet_csk(sk);
  289. int max_retries = icsk->icsk_syn_retries ? :
  290. sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
  291. struct request_sock *req;
  292. req = tcp_sk(sk)->fastopen_rsk;
  293. req->rsk_ops->syn_ack_timeout(req);
  294. if (req->num_timeout >= max_retries) {
  295. tcp_write_err(sk);
  296. return;
  297. }
  298. /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
  299. * returned from rtx_syn_ack() to make it more persistent like
  300. * regular retransmit because if the child socket has been accepted
  301. * it's not good to give up too easily.
  302. */
  303. inet_rtx_syn_ack(sk, req);
  304. req->num_timeout++;
  305. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  306. TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
  307. }
  308. /*
  309. * The TCP retransmit timer.
  310. */
  311. void tcp_retransmit_timer(struct sock *sk)
  312. {
  313. struct tcp_sock *tp = tcp_sk(sk);
  314. struct inet_connection_sock *icsk = inet_csk(sk);
  315. if (tp->fastopen_rsk) {
  316. WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
  317. sk->sk_state != TCP_FIN_WAIT1);
  318. tcp_fastopen_synack_timer(sk);
  319. /* Before we receive ACK to our SYN-ACK don't retransmit
  320. * anything else (e.g., data or FIN segments).
  321. */
  322. return;
  323. }
  324. if (!tp->packets_out)
  325. goto out;
  326. WARN_ON(tcp_write_queue_empty(sk));
  327. tp->tlp_high_seq = 0;
  328. if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
  329. !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
  330. /* Receiver dastardly shrinks window. Our retransmits
  331. * become zero probes, but we should not timeout this
  332. * connection. If the socket is an orphan, time it out,
  333. * we cannot allow such beasts to hang infinitely.
  334. */
  335. struct inet_sock *inet = inet_sk(sk);
  336. if (sk->sk_family == AF_INET) {
  337. net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
  338. &inet->inet_daddr,
  339. ntohs(inet->inet_dport),
  340. inet->inet_num,
  341. tp->snd_una, tp->snd_nxt);
  342. }
  343. #if IS_ENABLED(CONFIG_IPV6)
  344. else if (sk->sk_family == AF_INET6) {
  345. net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
  346. &sk->sk_v6_daddr,
  347. ntohs(inet->inet_dport),
  348. inet->inet_num,
  349. tp->snd_una, tp->snd_nxt);
  350. }
  351. #endif
  352. if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
  353. tcp_write_err(sk);
  354. goto out;
  355. }
  356. tcp_enter_loss(sk);
  357. tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
  358. __sk_dst_reset(sk);
  359. goto out_reset_timer;
  360. }
  361. if (tcp_write_timeout(sk))
  362. goto out;
  363. if (icsk->icsk_retransmits == 0) {
  364. int mib_idx;
  365. if (icsk->icsk_ca_state == TCP_CA_Recovery) {
  366. if (tcp_is_sack(tp))
  367. mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
  368. else
  369. mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
  370. } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
  371. mib_idx = LINUX_MIB_TCPLOSSFAILURES;
  372. } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
  373. tp->sacked_out) {
  374. if (tcp_is_sack(tp))
  375. mib_idx = LINUX_MIB_TCPSACKFAILURES;
  376. else
  377. mib_idx = LINUX_MIB_TCPRENOFAILURES;
  378. } else {
  379. mib_idx = LINUX_MIB_TCPTIMEOUTS;
  380. }
  381. NET_INC_STATS_BH(sock_net(sk), mib_idx);
  382. }
  383. tcp_enter_loss(sk);
  384. if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
  385. /* Retransmission failed because of local congestion,
  386. * do not backoff.
  387. */
  388. if (!icsk->icsk_retransmits)
  389. icsk->icsk_retransmits = 1;
  390. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  391. min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
  392. TCP_RTO_MAX);
  393. goto out;
  394. }
  395. /* Increase the timeout each time we retransmit. Note that
  396. * we do not increase the rtt estimate. rto is initialized
  397. * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
  398. * that doubling rto each time is the least we can get away with.
  399. * In KA9Q, Karn uses this for the first few times, and then
  400. * goes to quadratic. netBSD doubles, but only goes up to *64,
  401. * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
  402. * defined in the protocol as the maximum possible RTT. I guess
  403. * we'll have to use something other than TCP to talk to the
  404. * University of Mars.
  405. *
  406. * PAWS allows us longer timeouts and large windows, so once
  407. * implemented ftp to mars will work nicely. We will have to fix
  408. * the 120 second clamps though!
  409. */
  410. icsk->icsk_backoff++;
  411. icsk->icsk_retransmits++;
  412. out_reset_timer:
  413. /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
  414. * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
  415. * might be increased if the stream oscillates between thin and thick,
  416. * thus the old value might already be too high compared to the value
  417. * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
  418. * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
  419. * exponential backoff behaviour to avoid continue hammering
  420. * linear-timeout retransmissions into a black hole
  421. */
  422. if (sk->sk_state == TCP_ESTABLISHED &&
  423. (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
  424. tcp_stream_is_thin(tp) &&
  425. icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
  426. icsk->icsk_backoff = 0;
  427. icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
  428. } else {
  429. /* Use normal (exponential) backoff */
  430. icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
  431. }
  432. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
  433. if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
  434. __sk_dst_reset(sk);
  435. out:;
  436. }
  437. void tcp_write_timer_handler(struct sock *sk)
  438. {
  439. struct inet_connection_sock *icsk = inet_csk(sk);
  440. int event;
  441. if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
  442. goto out;
  443. if (time_after(icsk->icsk_timeout, jiffies)) {
  444. sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
  445. goto out;
  446. }
  447. event = icsk->icsk_pending;
  448. switch (event) {
  449. case ICSK_TIME_EARLY_RETRANS:
  450. tcp_resume_early_retransmit(sk);
  451. break;
  452. case ICSK_TIME_LOSS_PROBE:
  453. tcp_send_loss_probe(sk);
  454. break;
  455. case ICSK_TIME_RETRANS:
  456. icsk->icsk_pending = 0;
  457. tcp_retransmit_timer(sk);
  458. break;
  459. case ICSK_TIME_PROBE0:
  460. icsk->icsk_pending = 0;
  461. tcp_probe_timer(sk);
  462. break;
  463. }
  464. out:
  465. sk_mem_reclaim(sk);
  466. }
  467. static void tcp_write_timer(unsigned long data)
  468. {
  469. struct sock *sk = (struct sock *)data;
  470. bh_lock_sock(sk);
  471. if (!sock_owned_by_user(sk)) {
  472. tcp_write_timer_handler(sk);
  473. } else {
  474. /* deleguate our work to tcp_release_cb() */
  475. if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
  476. sock_hold(sk);
  477. }
  478. bh_unlock_sock(sk);
  479. sock_put(sk);
  480. }
  481. void tcp_syn_ack_timeout(const struct request_sock *req)
  482. {
  483. struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
  484. NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
  485. }
  486. EXPORT_SYMBOL(tcp_syn_ack_timeout);
  487. void tcp_set_keepalive(struct sock *sk, int val)
  488. {
  489. if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
  490. return;
  491. if (val && !sock_flag(sk, SOCK_KEEPOPEN))
  492. inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
  493. else if (!val)
  494. inet_csk_delete_keepalive_timer(sk);
  495. }
  496. static void tcp_keepalive_timer (unsigned long data)
  497. {
  498. struct sock *sk = (struct sock *) data;
  499. struct inet_connection_sock *icsk = inet_csk(sk);
  500. struct tcp_sock *tp = tcp_sk(sk);
  501. u32 elapsed;
  502. /* Only process if socket is not in use. */
  503. bh_lock_sock(sk);
  504. if (sock_owned_by_user(sk)) {
  505. /* Try again later. */
  506. inet_csk_reset_keepalive_timer (sk, HZ/20);
  507. goto out;
  508. }
  509. if (sk->sk_state == TCP_LISTEN) {
  510. pr_err("Hmm... keepalive on a LISTEN ???\n");
  511. goto out;
  512. }
  513. if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
  514. if (tp->linger2 >= 0) {
  515. const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
  516. if (tmo > 0) {
  517. tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
  518. goto out;
  519. }
  520. }
  521. tcp_send_active_reset(sk, GFP_ATOMIC);
  522. goto death;
  523. }
  524. if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
  525. goto out;
  526. elapsed = keepalive_time_when(tp);
  527. /* It is alive without keepalive 8) */
  528. if (tp->packets_out || tcp_send_head(sk))
  529. goto resched;
  530. elapsed = keepalive_time_elapsed(tp);
  531. if (elapsed >= keepalive_time_when(tp)) {
  532. /* If the TCP_USER_TIMEOUT option is enabled, use that
  533. * to determine when to timeout instead.
  534. */
  535. if ((icsk->icsk_user_timeout != 0 &&
  536. elapsed >= icsk->icsk_user_timeout &&
  537. icsk->icsk_probes_out > 0) ||
  538. (icsk->icsk_user_timeout == 0 &&
  539. icsk->icsk_probes_out >= keepalive_probes(tp))) {
  540. tcp_send_active_reset(sk, GFP_ATOMIC);
  541. tcp_write_err(sk);
  542. goto out;
  543. }
  544. if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
  545. icsk->icsk_probes_out++;
  546. elapsed = keepalive_intvl_when(tp);
  547. } else {
  548. /* If keepalive was lost due to local congestion,
  549. * try harder.
  550. */
  551. elapsed = TCP_RESOURCE_PROBE_INTERVAL;
  552. }
  553. } else {
  554. /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
  555. elapsed = keepalive_time_when(tp) - elapsed;
  556. }
  557. sk_mem_reclaim(sk);
  558. resched:
  559. inet_csk_reset_keepalive_timer (sk, elapsed);
  560. goto out;
  561. death:
  562. tcp_done(sk);
  563. out:
  564. bh_unlock_sock(sk);
  565. sock_put(sk);
  566. }
  567. void tcp_init_xmit_timers(struct sock *sk)
  568. {
  569. inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
  570. &tcp_keepalive_timer);
  571. }