tcp_fastopen.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. #include <linux/crypto.h>
  2. #include <linux/err.h>
  3. #include <linux/init.h>
  4. #include <linux/kernel.h>
  5. #include <linux/list.h>
  6. #include <linux/tcp.h>
  7. #include <linux/rcupdate.h>
  8. #include <linux/rculist.h>
  9. #include <net/inetpeer.h>
  10. #include <net/tcp.h>
  11. void tcp_fastopen_init_key_once(struct net *net)
  12. {
  13. u8 key[TCP_FASTOPEN_KEY_LENGTH];
  14. struct tcp_fastopen_context *ctxt;
  15. rcu_read_lock();
  16. ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
  17. if (ctxt) {
  18. rcu_read_unlock();
  19. return;
  20. }
  21. rcu_read_unlock();
  22. /* tcp_fastopen_reset_cipher publishes the new context
  23. * atomically, so we allow this race happening here.
  24. *
  25. * All call sites of tcp_fastopen_cookie_gen also check
  26. * for a valid cookie, so this is an acceptable risk.
  27. */
  28. get_random_bytes(key, sizeof(key));
  29. tcp_fastopen_reset_cipher(net, NULL, key, sizeof(key));
  30. }
  31. static void tcp_fastopen_ctx_free(struct rcu_head *head)
  32. {
  33. struct tcp_fastopen_context *ctx =
  34. container_of(head, struct tcp_fastopen_context, rcu);
  35. crypto_free_cipher(ctx->tfm);
  36. kfree(ctx);
  37. }
  38. void tcp_fastopen_destroy_cipher(struct sock *sk)
  39. {
  40. struct tcp_fastopen_context *ctx;
  41. ctx = rcu_dereference_protected(
  42. inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
  43. if (ctx)
  44. call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
  45. }
  46. void tcp_fastopen_ctx_destroy(struct net *net)
  47. {
  48. struct tcp_fastopen_context *ctxt;
  49. spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
  50. ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
  51. lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
  52. rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
  53. spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
  54. if (ctxt)
  55. call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
  56. }
  57. int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
  58. void *key, unsigned int len)
  59. {
  60. struct tcp_fastopen_context *ctx, *octx;
  61. struct fastopen_queue *q;
  62. int err;
  63. ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  64. if (!ctx)
  65. return -ENOMEM;
  66. ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
  67. if (IS_ERR(ctx->tfm)) {
  68. err = PTR_ERR(ctx->tfm);
  69. error: kfree(ctx);
  70. pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
  71. return err;
  72. }
  73. err = crypto_cipher_setkey(ctx->tfm, key, len);
  74. if (err) {
  75. pr_err("TCP: TFO cipher key error: %d\n", err);
  76. crypto_free_cipher(ctx->tfm);
  77. goto error;
  78. }
  79. memcpy(ctx->key, key, len);
  80. if (sk) {
  81. q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
  82. spin_lock_bh(&q->lock);
  83. octx = rcu_dereference_protected(q->ctx,
  84. lockdep_is_held(&q->lock));
  85. rcu_assign_pointer(q->ctx, ctx);
  86. spin_unlock_bh(&q->lock);
  87. } else {
  88. spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
  89. octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
  90. lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
  91. rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
  92. spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
  93. }
  94. if (octx)
  95. call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
  96. return err;
  97. }
  98. static bool __tcp_fastopen_cookie_gen(struct sock *sk, const void *path,
  99. struct tcp_fastopen_cookie *foc)
  100. {
  101. struct tcp_fastopen_context *ctx;
  102. bool ok = false;
  103. rcu_read_lock();
  104. ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
  105. if (!ctx)
  106. ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
  107. if (ctx) {
  108. crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
  109. foc->len = TCP_FASTOPEN_COOKIE_SIZE;
  110. ok = true;
  111. }
  112. rcu_read_unlock();
  113. return ok;
  114. }
  115. /* Generate the fastopen cookie by doing aes128 encryption on both
  116. * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
  117. * addresses. For the longer IPv6 addresses use CBC-MAC.
  118. *
  119. * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
  120. */
  121. static bool tcp_fastopen_cookie_gen(struct sock *sk,
  122. struct request_sock *req,
  123. struct sk_buff *syn,
  124. struct tcp_fastopen_cookie *foc)
  125. {
  126. if (req->rsk_ops->family == AF_INET) {
  127. const struct iphdr *iph = ip_hdr(syn);
  128. __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
  129. return __tcp_fastopen_cookie_gen(sk, path, foc);
  130. }
  131. #if IS_ENABLED(CONFIG_IPV6)
  132. if (req->rsk_ops->family == AF_INET6) {
  133. const struct ipv6hdr *ip6h = ipv6_hdr(syn);
  134. struct tcp_fastopen_cookie tmp;
  135. if (__tcp_fastopen_cookie_gen(sk, &ip6h->saddr, &tmp)) {
  136. struct in6_addr *buf = &tmp.addr;
  137. int i;
  138. for (i = 0; i < 4; i++)
  139. buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
  140. return __tcp_fastopen_cookie_gen(sk, buf, foc);
  141. }
  142. }
  143. #endif
  144. return false;
  145. }
  146. /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
  147. * queue this additional data / FIN.
  148. */
  149. void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
  150. {
  151. struct tcp_sock *tp = tcp_sk(sk);
  152. if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
  153. return;
  154. skb = skb_clone(skb, GFP_ATOMIC);
  155. if (!skb)
  156. return;
  157. skb_dst_drop(skb);
  158. /* segs_in has been initialized to 1 in tcp_create_openreq_child().
  159. * Hence, reset segs_in to 0 before calling tcp_segs_in()
  160. * to avoid double counting. Also, tcp_segs_in() expects
  161. * skb->len to include the tcp_hdrlen. Hence, it should
  162. * be called before __skb_pull().
  163. */
  164. tp->segs_in = 0;
  165. tcp_segs_in(tp, skb);
  166. __skb_pull(skb, tcp_hdrlen(skb));
  167. sk_forced_mem_schedule(sk, skb->truesize);
  168. skb_set_owner_r(skb, sk);
  169. TCP_SKB_CB(skb)->seq++;
  170. TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
  171. tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
  172. __skb_queue_tail(&sk->sk_receive_queue, skb);
  173. tp->syn_data_acked = 1;
  174. /* u64_stats_update_begin(&tp->syncp) not needed here,
  175. * as we certainly are not changing upper 32bit value (0)
  176. */
  177. tp->bytes_received = skb->len;
  178. if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
  179. tcp_fin(sk);
  180. }
  181. static struct sock *tcp_fastopen_create_child(struct sock *sk,
  182. struct sk_buff *skb,
  183. struct request_sock *req)
  184. {
  185. struct tcp_sock *tp;
  186. struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
  187. struct sock *child;
  188. bool own_req;
  189. req->num_retrans = 0;
  190. req->num_timeout = 0;
  191. req->sk = NULL;
  192. child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
  193. NULL, &own_req);
  194. if (!child)
  195. return NULL;
  196. spin_lock(&queue->fastopenq.lock);
  197. queue->fastopenq.qlen++;
  198. spin_unlock(&queue->fastopenq.lock);
  199. /* Initialize the child socket. Have to fix some values to take
  200. * into account the child is a Fast Open socket and is created
  201. * only out of the bits carried in the SYN packet.
  202. */
  203. tp = tcp_sk(child);
  204. tp->fastopen_rsk = req;
  205. tcp_rsk(req)->tfo_listener = true;
  206. /* RFC1323: The window in SYN & SYN/ACK segments is never
  207. * scaled. So correct it appropriately.
  208. */
  209. tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
  210. tp->max_window = tp->snd_wnd;
  211. /* Activate the retrans timer so that SYNACK can be retransmitted.
  212. * The request socket is not added to the ehash
  213. * because it's been added to the accept queue directly.
  214. */
  215. inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
  216. TCP_TIMEOUT_INIT, TCP_RTO_MAX);
  217. refcount_set(&req->rsk_refcnt, 2);
  218. /* Now finish processing the fastopen child socket. */
  219. tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
  220. tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
  221. tcp_fastopen_add_skb(child, skb);
  222. tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
  223. tp->rcv_wup = tp->rcv_nxt;
  224. /* tcp_conn_request() is sending the SYNACK,
  225. * and queues the child into listener accept queue.
  226. */
  227. return child;
  228. }
  229. static bool tcp_fastopen_queue_check(struct sock *sk)
  230. {
  231. struct fastopen_queue *fastopenq;
  232. /* Make sure the listener has enabled fastopen, and we don't
  233. * exceed the max # of pending TFO requests allowed before trying
  234. * to validating the cookie in order to avoid burning CPU cycles
  235. * unnecessarily.
  236. *
  237. * XXX (TFO) - The implication of checking the max_qlen before
  238. * processing a cookie request is that clients can't differentiate
  239. * between qlen overflow causing Fast Open to be disabled
  240. * temporarily vs a server not supporting Fast Open at all.
  241. */
  242. fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
  243. if (fastopenq->max_qlen == 0)
  244. return false;
  245. if (fastopenq->qlen >= fastopenq->max_qlen) {
  246. struct request_sock *req1;
  247. spin_lock(&fastopenq->lock);
  248. req1 = fastopenq->rskq_rst_head;
  249. if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
  250. __NET_INC_STATS(sock_net(sk),
  251. LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
  252. spin_unlock(&fastopenq->lock);
  253. return false;
  254. }
  255. fastopenq->rskq_rst_head = req1->dl_next;
  256. fastopenq->qlen--;
  257. spin_unlock(&fastopenq->lock);
  258. reqsk_put(req1);
  259. }
  260. return true;
  261. }
  262. static bool tcp_fastopen_no_cookie(const struct sock *sk,
  263. const struct dst_entry *dst,
  264. int flag)
  265. {
  266. return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
  267. tcp_sk(sk)->fastopen_no_cookie ||
  268. (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
  269. }
  270. /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
  271. * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
  272. * cookie request (foc->len == 0).
  273. */
  274. struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
  275. struct request_sock *req,
  276. struct tcp_fastopen_cookie *foc,
  277. const struct dst_entry *dst)
  278. {
  279. bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
  280. int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
  281. struct tcp_fastopen_cookie valid_foc = { .len = -1 };
  282. struct sock *child;
  283. if (foc->len == 0) /* Client requests a cookie */
  284. NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
  285. if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
  286. (syn_data || foc->len >= 0) &&
  287. tcp_fastopen_queue_check(sk))) {
  288. foc->len = -1;
  289. return NULL;
  290. }
  291. if (syn_data &&
  292. tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
  293. goto fastopen;
  294. if (foc->len >= 0 && /* Client presents or requests a cookie */
  295. tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc) &&
  296. foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
  297. foc->len == valid_foc.len &&
  298. !memcmp(foc->val, valid_foc.val, foc->len)) {
  299. /* Cookie is valid. Create a (full) child socket to accept
  300. * the data in SYN before returning a SYN-ACK to ack the
  301. * data. If we fail to create the socket, fall back and
  302. * ack the ISN only but includes the same cookie.
  303. *
  304. * Note: Data-less SYN with valid cookie is allowed to send
  305. * data in SYN_RECV state.
  306. */
  307. fastopen:
  308. child = tcp_fastopen_create_child(sk, skb, req);
  309. if (child) {
  310. foc->len = -1;
  311. NET_INC_STATS(sock_net(sk),
  312. LINUX_MIB_TCPFASTOPENPASSIVE);
  313. return child;
  314. }
  315. NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
  316. } else if (foc->len > 0) /* Client presents an invalid cookie */
  317. NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
  318. valid_foc.exp = foc->exp;
  319. *foc = valid_foc;
  320. return NULL;
  321. }
  322. bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
  323. struct tcp_fastopen_cookie *cookie)
  324. {
  325. unsigned long last_syn_loss = 0;
  326. const struct dst_entry *dst;
  327. int syn_loss = 0;
  328. tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
  329. /* Recurring FO SYN losses: no cookie or data in SYN */
  330. if (syn_loss > 1 &&
  331. time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
  332. cookie->len = -1;
  333. return false;
  334. }
  335. /* Firewall blackhole issue check */
  336. if (tcp_fastopen_active_should_disable(sk)) {
  337. cookie->len = -1;
  338. return false;
  339. }
  340. dst = __sk_dst_get(sk);
  341. if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
  342. cookie->len = -1;
  343. return true;
  344. }
  345. return cookie->len > 0;
  346. }
  347. /* This function checks if we want to defer sending SYN until the first
  348. * write(). We defer under the following conditions:
  349. * 1. fastopen_connect sockopt is set
  350. * 2. we have a valid cookie
  351. * Return value: return true if we want to defer until application writes data
  352. * return false if we want to send out SYN immediately
  353. */
  354. bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
  355. {
  356. struct tcp_fastopen_cookie cookie = { .len = 0 };
  357. struct tcp_sock *tp = tcp_sk(sk);
  358. u16 mss;
  359. if (tp->fastopen_connect && !tp->fastopen_req) {
  360. if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
  361. inet_sk(sk)->defer_connect = 1;
  362. return true;
  363. }
  364. /* Alloc fastopen_req in order for FO option to be included
  365. * in SYN
  366. */
  367. tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
  368. sk->sk_allocation);
  369. if (tp->fastopen_req)
  370. tp->fastopen_req->cookie = cookie;
  371. else
  372. *err = -ENOBUFS;
  373. }
  374. return false;
  375. }
  376. EXPORT_SYMBOL(tcp_fastopen_defer_connect);
  377. /*
  378. * The following code block is to deal with middle box issues with TFO:
  379. * Middlebox firewall issues can potentially cause server's data being
  380. * blackholed after a successful 3WHS using TFO.
  381. * The proposed solution is to disable active TFO globally under the
  382. * following circumstances:
  383. * 1. client side TFO socket receives out of order FIN
  384. * 2. client side TFO socket receives out of order RST
  385. * We disable active side TFO globally for 1hr at first. Then if it
  386. * happens again, we disable it for 2h, then 4h, 8h, ...
  387. * And we reset the timeout back to 1hr when we see a successful active
  388. * TFO connection with data exchanges.
  389. */
  390. /* Disable active TFO and record current jiffies and
  391. * tfo_active_disable_times
  392. */
  393. void tcp_fastopen_active_disable(struct sock *sk)
  394. {
  395. struct net *net = sock_net(sk);
  396. atomic_inc(&net->ipv4.tfo_active_disable_times);
  397. net->ipv4.tfo_active_disable_stamp = jiffies;
  398. NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
  399. }
  400. /* Calculate timeout for tfo active disable
  401. * Return true if we are still in the active TFO disable period
  402. * Return false if timeout already expired and we should use active TFO
  403. */
  404. bool tcp_fastopen_active_should_disable(struct sock *sk)
  405. {
  406. unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
  407. int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
  408. unsigned long timeout;
  409. int multiplier;
  410. if (!tfo_da_times)
  411. return false;
  412. /* Limit timout to max: 2^6 * initial timeout */
  413. multiplier = 1 << min(tfo_da_times - 1, 6);
  414. timeout = multiplier * tfo_bh_timeout * HZ;
  415. if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
  416. return true;
  417. /* Mark check bit so we can check for successful active TFO
  418. * condition and reset tfo_active_disable_times
  419. */
  420. tcp_sk(sk)->syn_fastopen_ch = 1;
  421. return false;
  422. }
  423. /* Disable active TFO if FIN is the only packet in the ofo queue
  424. * and no data is received.
  425. * Also check if we can reset tfo_active_disable_times if data is
  426. * received successfully on a marked active TFO sockets opened on
  427. * a non-loopback interface
  428. */
  429. void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
  430. {
  431. struct tcp_sock *tp = tcp_sk(sk);
  432. struct dst_entry *dst;
  433. struct sk_buff *skb;
  434. if (!tp->syn_fastopen)
  435. return;
  436. if (!tp->data_segs_in) {
  437. skb = skb_rb_first(&tp->out_of_order_queue);
  438. if (skb && !skb_rb_next(skb)) {
  439. if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
  440. tcp_fastopen_active_disable(sk);
  441. return;
  442. }
  443. }
  444. } else if (tp->syn_fastopen_ch &&
  445. atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
  446. dst = sk_dst_get(sk);
  447. if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
  448. atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
  449. dst_release(dst);
  450. }
  451. }