tcp_timer.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Implementation of the Transmission Control Protocol(TCP).
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  11. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14. * Linus Torvalds, <torvalds@cs.helsinki.fi>
  15. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  16. * Matthew Dillon, <dillon@apollo.west.oic.com>
  17. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18. * Jorge Cwik, <jorge@laser.satlink.net>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/gfp.h>
  22. #include <net/tcp.h>
  23. int sysctl_tcp_thin_linear_timeouts __read_mostly;
  24. /**
  25. * tcp_write_err() - close socket and save error info
  26. * @sk: The socket the error has appeared on.
  27. *
  28. * Returns: Nothing (void)
  29. */
  30. static void tcp_write_err(struct sock *sk)
  31. {
  32. sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
  33. sk->sk_error_report(sk);
  34. tcp_done(sk);
  35. __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
  36. }
  37. /**
  38. * tcp_out_of_resources() - Close socket if out of resources
  39. * @sk: pointer to current socket
  40. * @do_reset: send a last packet with reset flag
  41. *
  42. * Do not allow orphaned sockets to eat all our resources.
  43. * This is direct violation of TCP specs, but it is required
  44. * to prevent DoS attacks. It is called when a retransmission timeout
  45. * or zero probe timeout occurs on orphaned socket.
  46. *
  47. * Criteria is still not confirmed experimentally and may change.
  48. * We kill the socket, if:
  49. * 1. If number of orphaned sockets exceeds an administratively configured
  50. * limit.
  51. * 2. If we have strong memory pressure.
  52. */
  53. static int tcp_out_of_resources(struct sock *sk, bool do_reset)
  54. {
  55. struct tcp_sock *tp = tcp_sk(sk);
  56. int shift = 0;
  57. /* If peer does not open window for long time, or did not transmit
  58. * anything for long time, penalize it. */
  59. if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
  60. shift++;
  61. /* If some dubious ICMP arrived, penalize even more. */
  62. if (sk->sk_err_soft)
  63. shift++;
  64. if (tcp_check_oom(sk, shift)) {
  65. /* Catch exceptional cases, when connection requires reset.
  66. * 1. Last segment was sent recently. */
  67. if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
  68. /* 2. Window is closed. */
  69. (!tp->snd_wnd && !tp->packets_out))
  70. do_reset = true;
  71. if (do_reset)
  72. tcp_send_active_reset(sk, GFP_ATOMIC);
  73. tcp_done(sk);
  74. __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
  75. return 1;
  76. }
  77. return 0;
  78. }
  79. /**
  80. * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
  81. * @sk: Pointer to the current socket.
  82. * @alive: bool, socket alive state
  83. */
  84. static int tcp_orphan_retries(struct sock *sk, bool alive)
  85. {
  86. int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
  87. /* We know from an ICMP that something is wrong. */
  88. if (sk->sk_err_soft && !alive)
  89. retries = 0;
  90. /* However, if socket sent something recently, select some safe
  91. * number of retries. 8 corresponds to >100 seconds with minimal
  92. * RTO of 200msec. */
  93. if (retries == 0 && alive)
  94. retries = 8;
  95. return retries;
  96. }
  97. static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
  98. {
  99. struct net *net = sock_net(sk);
  100. /* Black hole detection */
  101. if (net->ipv4.sysctl_tcp_mtu_probing) {
  102. if (!icsk->icsk_mtup.enabled) {
  103. icsk->icsk_mtup.enabled = 1;
  104. icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
  105. tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
  106. } else {
  107. struct net *net = sock_net(sk);
  108. struct tcp_sock *tp = tcp_sk(sk);
  109. int mss;
  110. mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
  111. mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
  112. mss = max(mss, 68 - tp->tcp_header_len);
  113. icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
  114. tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
  115. }
  116. }
  117. }
  118. /**
  119. * retransmits_timed_out() - returns true if this connection has timed out
  120. * @sk: The current socket
  121. * @boundary: max number of retransmissions
  122. * @timeout: A custom timeout value.
  123. * If set to 0 the default timeout is calculated and used.
  124. * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
  125. *
  126. * The default "timeout" value this function can calculate and use
  127. * is equivalent to the timeout of a TCP Connection
  128. * after "boundary" unsuccessful, exponentially backed-off
  129. * retransmissions with an initial RTO of TCP_RTO_MIN.
  130. */
  131. static bool retransmits_timed_out(struct sock *sk,
  132. unsigned int boundary,
  133. unsigned int timeout)
  134. {
  135. const unsigned int rto_base = TCP_RTO_MIN;
  136. unsigned int linear_backoff_thresh, start_ts;
  137. if (!inet_csk(sk)->icsk_retransmits)
  138. return false;
  139. start_ts = tcp_sk(sk)->retrans_stamp;
  140. if (unlikely(!start_ts))
  141. start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
  142. if (likely(timeout == 0)) {
  143. linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
  144. if (boundary <= linear_backoff_thresh)
  145. timeout = ((2 << boundary) - 1) * rto_base;
  146. else
  147. timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
  148. (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
  149. }
  150. return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout);
  151. }
  152. /* A write timeout has occurred. Process the after effects. */
  153. static int tcp_write_timeout(struct sock *sk)
  154. {
  155. struct inet_connection_sock *icsk = inet_csk(sk);
  156. struct tcp_sock *tp = tcp_sk(sk);
  157. struct net *net = sock_net(sk);
  158. bool expired, do_reset;
  159. int retry_until;
  160. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
  161. if (icsk->icsk_retransmits) {
  162. dst_negative_advice(sk);
  163. if (tp->syn_fastopen || tp->syn_data)
  164. tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
  165. if (tp->syn_data && icsk->icsk_retransmits == 1)
  166. NET_INC_STATS(sock_net(sk),
  167. LINUX_MIB_TCPFASTOPENACTIVEFAIL);
  168. } else if (!tp->syn_data && !tp->syn_fastopen) {
  169. sk_rethink_txhash(sk);
  170. }
  171. retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
  172. expired = icsk->icsk_retransmits >= retry_until;
  173. } else {
  174. if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
  175. /* Some middle-boxes may black-hole Fast Open _after_
  176. * the handshake. Therefore we conservatively disable
  177. * Fast Open on this path on recurring timeouts after
  178. * successful Fast Open.
  179. */
  180. if (tp->syn_data_acked) {
  181. tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
  182. if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
  183. NET_INC_STATS(sock_net(sk),
  184. LINUX_MIB_TCPFASTOPENACTIVEFAIL);
  185. }
  186. /* Black hole detection */
  187. tcp_mtu_probing(icsk, sk);
  188. dst_negative_advice(sk);
  189. } else {
  190. sk_rethink_txhash(sk);
  191. }
  192. retry_until = net->ipv4.sysctl_tcp_retries2;
  193. if (sock_flag(sk, SOCK_DEAD)) {
  194. const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
  195. retry_until = tcp_orphan_retries(sk, alive);
  196. do_reset = alive ||
  197. !retransmits_timed_out(sk, retry_until, 0);
  198. if (tcp_out_of_resources(sk, do_reset))
  199. return 1;
  200. }
  201. expired = retransmits_timed_out(sk, retry_until,
  202. icsk->icsk_user_timeout);
  203. }
  204. if (expired) {
  205. /* Has it gone just too far? */
  206. tcp_write_err(sk);
  207. return 1;
  208. }
  209. return 0;
  210. }
  211. /* Called with BH disabled */
  212. void tcp_delack_timer_handler(struct sock *sk)
  213. {
  214. struct tcp_sock *tp = tcp_sk(sk);
  215. struct inet_connection_sock *icsk = inet_csk(sk);
  216. sk_mem_reclaim_partial(sk);
  217. if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
  218. !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
  219. goto out;
  220. if (time_after(icsk->icsk_ack.timeout, jiffies)) {
  221. sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
  222. goto out;
  223. }
  224. icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
  225. if (!skb_queue_empty(&tp->ucopy.prequeue)) {
  226. struct sk_buff *skb;
  227. __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
  228. while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
  229. sk_backlog_rcv(sk, skb);
  230. tp->ucopy.memory = 0;
  231. }
  232. if (inet_csk_ack_scheduled(sk)) {
  233. if (!icsk->icsk_ack.pingpong) {
  234. /* Delayed ACK missed: inflate ATO. */
  235. icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
  236. } else {
  237. /* Delayed ACK missed: leave pingpong mode and
  238. * deflate ATO.
  239. */
  240. icsk->icsk_ack.pingpong = 0;
  241. icsk->icsk_ack.ato = TCP_ATO_MIN;
  242. }
  243. tcp_send_ack(sk);
  244. __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
  245. }
  246. out:
  247. if (tcp_under_memory_pressure(sk))
  248. sk_mem_reclaim(sk);
  249. }
  250. /**
  251. * tcp_delack_timer() - The TCP delayed ACK timeout handler
  252. * @data: Pointer to the current socket. (gets casted to struct sock *)
  253. *
  254. * This function gets (indirectly) called when the kernel timer for a TCP packet
  255. * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
  256. *
  257. * Returns: Nothing (void)
  258. */
  259. static void tcp_delack_timer(unsigned long data)
  260. {
  261. struct sock *sk = (struct sock *)data;
  262. bh_lock_sock(sk);
  263. if (!sock_owned_by_user(sk)) {
  264. tcp_delack_timer_handler(sk);
  265. } else {
  266. inet_csk(sk)->icsk_ack.blocked = 1;
  267. __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
  268. /* deleguate our work to tcp_release_cb() */
  269. if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
  270. sock_hold(sk);
  271. }
  272. bh_unlock_sock(sk);
  273. sock_put(sk);
  274. }
  275. static void tcp_probe_timer(struct sock *sk)
  276. {
  277. struct inet_connection_sock *icsk = inet_csk(sk);
  278. struct tcp_sock *tp = tcp_sk(sk);
  279. int max_probes;
  280. u32 start_ts;
  281. if (tp->packets_out || !tcp_send_head(sk)) {
  282. icsk->icsk_probes_out = 0;
  283. return;
  284. }
  285. /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
  286. * long as the receiver continues to respond probes. We support this by
  287. * default and reset icsk_probes_out with incoming ACKs. But if the
  288. * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
  289. * kill the socket when the retry count and the time exceeds the
  290. * corresponding system limit. We also implement similar policy when
  291. * we use RTO to probe window in tcp_retransmit_timer().
  292. */
  293. start_ts = tcp_skb_timestamp(tcp_send_head(sk));
  294. if (!start_ts)
  295. tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp;
  296. else if (icsk->icsk_user_timeout &&
  297. (s32)(tcp_time_stamp(tp) - start_ts) >
  298. jiffies_to_msecs(icsk->icsk_user_timeout))
  299. goto abort;
  300. max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
  301. if (sock_flag(sk, SOCK_DEAD)) {
  302. const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
  303. max_probes = tcp_orphan_retries(sk, alive);
  304. if (!alive && icsk->icsk_backoff >= max_probes)
  305. goto abort;
  306. if (tcp_out_of_resources(sk, true))
  307. return;
  308. }
  309. if (icsk->icsk_probes_out > max_probes) {
  310. abort: tcp_write_err(sk);
  311. } else {
  312. /* Only send another probe if we didn't close things up. */
  313. tcp_send_probe0(sk);
  314. }
  315. }
  316. /*
  317. * Timer for Fast Open socket to retransmit SYNACK. Note that the
  318. * sk here is the child socket, not the parent (listener) socket.
  319. */
  320. static void tcp_fastopen_synack_timer(struct sock *sk)
  321. {
  322. struct inet_connection_sock *icsk = inet_csk(sk);
  323. int max_retries = icsk->icsk_syn_retries ? :
  324. sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
  325. struct request_sock *req;
  326. req = tcp_sk(sk)->fastopen_rsk;
  327. req->rsk_ops->syn_ack_timeout(req);
  328. if (req->num_timeout >= max_retries) {
  329. tcp_write_err(sk);
  330. return;
  331. }
  332. /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
  333. * returned from rtx_syn_ack() to make it more persistent like
  334. * regular retransmit because if the child socket has been accepted
  335. * it's not good to give up too easily.
  336. */
  337. inet_rtx_syn_ack(sk, req);
  338. req->num_timeout++;
  339. icsk->icsk_retransmits++;
  340. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  341. TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
  342. }
  343. /**
  344. * tcp_retransmit_timer() - The TCP retransmit timeout handler
  345. * @sk: Pointer to the current socket.
  346. *
  347. * This function gets called when the kernel timer for a TCP packet
  348. * of this socket expires.
  349. *
  350. * It handles retransmission, timer adjustment and other necesarry measures.
  351. *
  352. * Returns: Nothing (void)
  353. */
  354. void tcp_retransmit_timer(struct sock *sk)
  355. {
  356. struct tcp_sock *tp = tcp_sk(sk);
  357. struct net *net = sock_net(sk);
  358. struct inet_connection_sock *icsk = inet_csk(sk);
  359. if (tp->fastopen_rsk) {
  360. WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
  361. sk->sk_state != TCP_FIN_WAIT1);
  362. tcp_fastopen_synack_timer(sk);
  363. /* Before we receive ACK to our SYN-ACK don't retransmit
  364. * anything else (e.g., data or FIN segments).
  365. */
  366. return;
  367. }
  368. if (!tp->packets_out)
  369. goto out;
  370. WARN_ON(tcp_write_queue_empty(sk));
  371. tp->tlp_high_seq = 0;
  372. if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
  373. !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
  374. /* Receiver dastardly shrinks window. Our retransmits
  375. * become zero probes, but we should not timeout this
  376. * connection. If the socket is an orphan, time it out,
  377. * we cannot allow such beasts to hang infinitely.
  378. */
  379. struct inet_sock *inet = inet_sk(sk);
  380. if (sk->sk_family == AF_INET) {
  381. net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
  382. &inet->inet_daddr,
  383. ntohs(inet->inet_dport),
  384. inet->inet_num,
  385. tp->snd_una, tp->snd_nxt);
  386. }
  387. #if IS_ENABLED(CONFIG_IPV6)
  388. else if (sk->sk_family == AF_INET6) {
  389. net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
  390. &sk->sk_v6_daddr,
  391. ntohs(inet->inet_dport),
  392. inet->inet_num,
  393. tp->snd_una, tp->snd_nxt);
  394. }
  395. #endif
  396. if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
  397. tcp_write_err(sk);
  398. goto out;
  399. }
  400. tcp_enter_loss(sk);
  401. tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1);
  402. __sk_dst_reset(sk);
  403. goto out_reset_timer;
  404. }
  405. if (tcp_write_timeout(sk))
  406. goto out;
  407. if (icsk->icsk_retransmits == 0) {
  408. int mib_idx;
  409. if (icsk->icsk_ca_state == TCP_CA_Recovery) {
  410. if (tcp_is_sack(tp))
  411. mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
  412. else
  413. mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
  414. } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
  415. mib_idx = LINUX_MIB_TCPLOSSFAILURES;
  416. } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
  417. tp->sacked_out) {
  418. if (tcp_is_sack(tp))
  419. mib_idx = LINUX_MIB_TCPSACKFAILURES;
  420. else
  421. mib_idx = LINUX_MIB_TCPRENOFAILURES;
  422. } else {
  423. mib_idx = LINUX_MIB_TCPTIMEOUTS;
  424. }
  425. __NET_INC_STATS(sock_net(sk), mib_idx);
  426. }
  427. tcp_enter_loss(sk);
  428. if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1) > 0) {
  429. /* Retransmission failed because of local congestion,
  430. * do not backoff.
  431. */
  432. if (!icsk->icsk_retransmits)
  433. icsk->icsk_retransmits = 1;
  434. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  435. min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
  436. TCP_RTO_MAX);
  437. goto out;
  438. }
  439. /* Increase the timeout each time we retransmit. Note that
  440. * we do not increase the rtt estimate. rto is initialized
  441. * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
  442. * that doubling rto each time is the least we can get away with.
  443. * In KA9Q, Karn uses this for the first few times, and then
  444. * goes to quadratic. netBSD doubles, but only goes up to *64,
  445. * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
  446. * defined in the protocol as the maximum possible RTT. I guess
  447. * we'll have to use something other than TCP to talk to the
  448. * University of Mars.
  449. *
  450. * PAWS allows us longer timeouts and large windows, so once
  451. * implemented ftp to mars will work nicely. We will have to fix
  452. * the 120 second clamps though!
  453. */
  454. icsk->icsk_backoff++;
  455. icsk->icsk_retransmits++;
  456. out_reset_timer:
  457. /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
  458. * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
  459. * might be increased if the stream oscillates between thin and thick,
  460. * thus the old value might already be too high compared to the value
  461. * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
  462. * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
  463. * exponential backoff behaviour to avoid continue hammering
  464. * linear-timeout retransmissions into a black hole
  465. */
  466. if (sk->sk_state == TCP_ESTABLISHED &&
  467. (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
  468. tcp_stream_is_thin(tp) &&
  469. icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
  470. icsk->icsk_backoff = 0;
  471. icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
  472. } else {
  473. /* Use normal (exponential) backoff */
  474. icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
  475. }
  476. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
  477. if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
  478. __sk_dst_reset(sk);
  479. out:;
  480. }
  481. /* Called with bottom-half processing disabled.
  482. Called by tcp_write_timer() */
  483. void tcp_write_timer_handler(struct sock *sk)
  484. {
  485. struct inet_connection_sock *icsk = inet_csk(sk);
  486. int event;
  487. if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
  488. !icsk->icsk_pending)
  489. goto out;
  490. if (time_after(icsk->icsk_timeout, jiffies)) {
  491. sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
  492. goto out;
  493. }
  494. tcp_mstamp_refresh(tcp_sk(sk));
  495. event = icsk->icsk_pending;
  496. switch (event) {
  497. case ICSK_TIME_REO_TIMEOUT:
  498. tcp_rack_reo_timeout(sk);
  499. break;
  500. case ICSK_TIME_LOSS_PROBE:
  501. tcp_send_loss_probe(sk);
  502. break;
  503. case ICSK_TIME_RETRANS:
  504. icsk->icsk_pending = 0;
  505. tcp_retransmit_timer(sk);
  506. break;
  507. case ICSK_TIME_PROBE0:
  508. icsk->icsk_pending = 0;
  509. tcp_probe_timer(sk);
  510. break;
  511. }
  512. out:
  513. sk_mem_reclaim(sk);
  514. }
  515. static void tcp_write_timer(unsigned long data)
  516. {
  517. struct sock *sk = (struct sock *)data;
  518. bh_lock_sock(sk);
  519. if (!sock_owned_by_user(sk)) {
  520. tcp_write_timer_handler(sk);
  521. } else {
  522. /* delegate our work to tcp_release_cb() */
  523. if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
  524. sock_hold(sk);
  525. }
  526. bh_unlock_sock(sk);
  527. sock_put(sk);
  528. }
  529. void tcp_syn_ack_timeout(const struct request_sock *req)
  530. {
  531. struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
  532. __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
  533. }
  534. EXPORT_SYMBOL(tcp_syn_ack_timeout);
  535. void tcp_set_keepalive(struct sock *sk, int val)
  536. {
  537. if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
  538. return;
  539. if (val && !sock_flag(sk, SOCK_KEEPOPEN))
  540. inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
  541. else if (!val)
  542. inet_csk_delete_keepalive_timer(sk);
  543. }
  544. EXPORT_SYMBOL_GPL(tcp_set_keepalive);
  545. static void tcp_keepalive_timer (unsigned long data)
  546. {
  547. struct sock *sk = (struct sock *) data;
  548. struct inet_connection_sock *icsk = inet_csk(sk);
  549. struct tcp_sock *tp = tcp_sk(sk);
  550. u32 elapsed;
  551. /* Only process if socket is not in use. */
  552. bh_lock_sock(sk);
  553. if (sock_owned_by_user(sk)) {
  554. /* Try again later. */
  555. inet_csk_reset_keepalive_timer (sk, HZ/20);
  556. goto out;
  557. }
  558. if (sk->sk_state == TCP_LISTEN) {
  559. pr_err("Hmm... keepalive on a LISTEN ???\n");
  560. goto out;
  561. }
  562. if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
  563. if (tp->linger2 >= 0) {
  564. const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
  565. if (tmo > 0) {
  566. tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
  567. goto out;
  568. }
  569. }
  570. tcp_send_active_reset(sk, GFP_ATOMIC);
  571. goto death;
  572. }
  573. if (!sock_flag(sk, SOCK_KEEPOPEN) ||
  574. ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
  575. goto out;
  576. elapsed = keepalive_time_when(tp);
  577. /* It is alive without keepalive 8) */
  578. if (tp->packets_out || tcp_send_head(sk))
  579. goto resched;
  580. elapsed = keepalive_time_elapsed(tp);
  581. if (elapsed >= keepalive_time_when(tp)) {
  582. /* If the TCP_USER_TIMEOUT option is enabled, use that
  583. * to determine when to timeout instead.
  584. */
  585. if ((icsk->icsk_user_timeout != 0 &&
  586. elapsed >= icsk->icsk_user_timeout &&
  587. icsk->icsk_probes_out > 0) ||
  588. (icsk->icsk_user_timeout == 0 &&
  589. icsk->icsk_probes_out >= keepalive_probes(tp))) {
  590. tcp_send_active_reset(sk, GFP_ATOMIC);
  591. tcp_write_err(sk);
  592. goto out;
  593. }
  594. if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
  595. icsk->icsk_probes_out++;
  596. elapsed = keepalive_intvl_when(tp);
  597. } else {
  598. /* If keepalive was lost due to local congestion,
  599. * try harder.
  600. */
  601. elapsed = TCP_RESOURCE_PROBE_INTERVAL;
  602. }
  603. } else {
  604. /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
  605. elapsed = keepalive_time_when(tp) - elapsed;
  606. }
  607. sk_mem_reclaim(sk);
  608. resched:
  609. inet_csk_reset_keepalive_timer (sk, elapsed);
  610. goto out;
  611. death:
  612. tcp_done(sk);
  613. out:
  614. bh_unlock_sock(sk);
  615. sock_put(sk);
  616. }
  617. void tcp_init_xmit_timers(struct sock *sk)
  618. {
  619. inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
  620. &tcp_keepalive_timer);
  621. hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
  622. HRTIMER_MODE_ABS_PINNED);
  623. tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
  624. }