|
@@ -470,13 +470,14 @@ out:
|
|
|
|
|
|
|
|
|
static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
|
|
|
- struct flowi6 *fl6,
|
|
|
+ struct flowi *fl,
|
|
|
struct request_sock *req,
|
|
|
u16 queue_mapping,
|
|
|
struct tcp_fastopen_cookie *foc)
|
|
|
{
|
|
|
struct inet_request_sock *ireq = inet_rsk(req);
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
+ struct flowi6 *fl6 = &fl->u.ip6;
|
|
|
struct sk_buff *skb;
|
|
|
int err = -ENOMEM;
|
|
|
|
|
@@ -497,24 +498,14 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
|
|
|
skb_set_queue_mapping(skb, queue_mapping);
|
|
|
err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
|
|
|
err = net_xmit_eval(err);
|
|
|
+ if (!tcp_rsk(req)->snt_synack && !err)
|
|
|
+ tcp_rsk(req)->snt_synack = tcp_time_stamp;
|
|
|
}
|
|
|
|
|
|
done:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
|
|
|
-{
|
|
|
- struct flowi6 fl6;
|
|
|
- int res;
|
|
|
-
|
|
|
- res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL);
|
|
|
- if (!res) {
|
|
|
- TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
|
|
|
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
|
|
|
- }
|
|
|
- return res;
|
|
|
-}
|
|
|
|
|
|
static void tcp_v6_reqsk_destructor(struct request_sock *req)
|
|
|
{
|
|
@@ -718,22 +709,66 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
|
|
|
+ struct sk_buff *skb)
|
|
|
+{
|
|
|
+ struct inet_request_sock *ireq = inet_rsk(req);
|
|
|
+ struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
+
|
|
|
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
|
|
|
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
|
|
|
+
|
|
|
+ ireq->ir_iif = sk->sk_bound_dev_if;
|
|
|
+
|
|
|
+ /* So that link locals have meaning */
|
|
|
+ if (!sk->sk_bound_dev_if &&
|
|
|
+ ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
|
|
|
+ ireq->ir_iif = inet6_iif(skb);
|
|
|
+
|
|
|
+ if (!TCP_SKB_CB(skb)->when &&
|
|
|
+ (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
|
|
|
+ np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
|
|
|
+ np->rxopt.bits.rxohlim || np->repflow)) {
|
|
|
+ atomic_inc(&skb->users);
|
|
|
+ ireq->pktopts = skb;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
|
|
|
+ const struct request_sock *req,
|
|
|
+ bool *strict)
|
|
|
+{
|
|
|
+ if (strict)
|
|
|
+ *strict = true;
|
|
|
+ return inet6_csk_route_req(sk, &fl->u.ip6, req);
|
|
|
+}
|
|
|
+
|
|
|
struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
|
|
|
.family = AF_INET6,
|
|
|
.obj_size = sizeof(struct tcp6_request_sock),
|
|
|
- .rtx_syn_ack = tcp_v6_rtx_synack,
|
|
|
+ .rtx_syn_ack = tcp_rtx_synack,
|
|
|
.send_ack = tcp_v6_reqsk_send_ack,
|
|
|
.destructor = tcp_v6_reqsk_destructor,
|
|
|
.send_reset = tcp_v6_send_reset,
|
|
|
.syn_ack_timeout = tcp_syn_ack_timeout,
|
|
|
};
|
|
|
|
|
|
-#ifdef CONFIG_TCP_MD5SIG
|
|
|
static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
|
|
|
+ .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
|
|
|
+ sizeof(struct ipv6hdr),
|
|
|
+#ifdef CONFIG_TCP_MD5SIG
|
|
|
.md5_lookup = tcp_v6_reqsk_md5_lookup,
|
|
|
.calc_md5_hash = tcp_v6_md5_hash_skb,
|
|
|
-};
|
|
|
#endif
|
|
|
+ .init_req = tcp_v6_init_req,
|
|
|
+#ifdef CONFIG_SYN_COOKIES
|
|
|
+ .cookie_init_seq = cookie_v6_init_sequence,
|
|
|
+#endif
|
|
|
+ .route_req = tcp_v6_route_req,
|
|
|
+ .init_seq = tcp_v6_init_sequence,
|
|
|
+ .send_synack = tcp_v6_send_synack,
|
|
|
+ .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
|
|
|
+};
|
|
|
|
|
|
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
|
|
|
u32 tsval, u32 tsecr, int oif,
|
|
@@ -973,152 +1008,17 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
|
|
|
return sk;
|
|
|
}
|
|
|
|
|
|
-/* FIXME: this is substantially similar to the ipv4 code.
|
|
|
- * Can some kind of merge be done? -- erics
|
|
|
- */
|
|
|
static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|
|
{
|
|
|
- struct tcp_options_received tmp_opt;
|
|
|
- struct request_sock *req;
|
|
|
- struct inet_request_sock *ireq;
|
|
|
- struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
- __u32 isn = TCP_SKB_CB(skb)->when;
|
|
|
- struct dst_entry *dst = NULL;
|
|
|
- struct tcp_fastopen_cookie foc = { .len = -1 };
|
|
|
- bool want_cookie = false, fastopen;
|
|
|
- struct flowi6 fl6;
|
|
|
- int err;
|
|
|
-
|
|
|
if (skb->protocol == htons(ETH_P_IP))
|
|
|
return tcp_v4_conn_request(sk, skb);
|
|
|
|
|
|
if (!ipv6_unicast_destination(skb))
|
|
|
goto drop;
|
|
|
|
|
|
- if ((sysctl_tcp_syncookies == 2 ||
|
|
|
- inet_csk_reqsk_queue_is_full(sk)) && !isn) {
|
|
|
- want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
|
|
|
- if (!want_cookie)
|
|
|
- goto drop;
|
|
|
- }
|
|
|
-
|
|
|
- if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
|
|
|
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
|
|
- goto drop;
|
|
|
- }
|
|
|
-
|
|
|
- req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
|
|
|
- if (req == NULL)
|
|
|
- goto drop;
|
|
|
-
|
|
|
-#ifdef CONFIG_TCP_MD5SIG
|
|
|
- tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
|
|
|
-#endif
|
|
|
-
|
|
|
- tcp_clear_options(&tmp_opt);
|
|
|
- tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
|
|
|
- tmp_opt.user_mss = tp->rx_opt.user_mss;
|
|
|
- tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
|
|
|
-
|
|
|
- if (want_cookie && !tmp_opt.saw_tstamp)
|
|
|
- tcp_clear_options(&tmp_opt);
|
|
|
-
|
|
|
- tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
|
|
|
- tcp_openreq_init(req, &tmp_opt, skb, sk);
|
|
|
-
|
|
|
- ireq = inet_rsk(req);
|
|
|
- ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
|
|
|
- ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
|
|
|
- if (!want_cookie || tmp_opt.tstamp_ok)
|
|
|
- TCP_ECN_create_request(req, skb, sock_net(sk));
|
|
|
-
|
|
|
- ireq->ir_iif = sk->sk_bound_dev_if;
|
|
|
-
|
|
|
- /* So that link locals have meaning */
|
|
|
- if (!sk->sk_bound_dev_if &&
|
|
|
- ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
|
|
|
- ireq->ir_iif = inet6_iif(skb);
|
|
|
-
|
|
|
- if (!isn) {
|
|
|
- if (ipv6_opt_accepted(sk, skb) ||
|
|
|
- np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
|
|
- np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim ||
|
|
|
- np->repflow) {
|
|
|
- atomic_inc(&skb->users);
|
|
|
- ireq->pktopts = skb;
|
|
|
- }
|
|
|
-
|
|
|
- if (want_cookie) {
|
|
|
- isn = cookie_v6_init_sequence(sk, skb, &req->mss);
|
|
|
- req->cookie_ts = tmp_opt.tstamp_ok;
|
|
|
- goto have_isn;
|
|
|
- }
|
|
|
+ return tcp_conn_request(&tcp6_request_sock_ops,
|
|
|
+ &tcp_request_sock_ipv6_ops, sk, skb);
|
|
|
|
|
|
- /* VJ's idea. We save last timestamp seen
|
|
|
- * from the destination in peer table, when entering
|
|
|
- * state TIME-WAIT, and check against it before
|
|
|
- * accepting new connection request.
|
|
|
- *
|
|
|
- * If "isn" is not zero, this request hit alive
|
|
|
- * timewait bucket, so that all the necessary checks
|
|
|
- * are made in the function processing timewait state.
|
|
|
- */
|
|
|
- if (tmp_opt.saw_tstamp &&
|
|
|
- tcp_death_row.sysctl_tw_recycle &&
|
|
|
- (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
|
|
|
- if (!tcp_peer_is_proven(req, dst, true)) {
|
|
|
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
|
|
|
- goto drop_and_release;
|
|
|
- }
|
|
|
- }
|
|
|
- /* Kill the following clause, if you dislike this way. */
|
|
|
- else if (!sysctl_tcp_syncookies &&
|
|
|
- (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
|
|
|
- (sysctl_max_syn_backlog >> 2)) &&
|
|
|
- !tcp_peer_is_proven(req, dst, false)) {
|
|
|
- /* Without syncookies last quarter of
|
|
|
- * backlog is filled with destinations,
|
|
|
- * proven to be alive.
|
|
|
- * It means that we continue to communicate
|
|
|
- * to destinations, already remembered
|
|
|
- * to the moment of synflood.
|
|
|
- */
|
|
|
- LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
|
|
|
- &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
|
|
|
- goto drop_and_release;
|
|
|
- }
|
|
|
-
|
|
|
- isn = tcp_v6_init_sequence(skb);
|
|
|
- }
|
|
|
-have_isn:
|
|
|
-
|
|
|
- if (security_inet_conn_request(sk, skb, req))
|
|
|
- goto drop_and_release;
|
|
|
-
|
|
|
- if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL)
|
|
|
- goto drop_and_free;
|
|
|
-
|
|
|
- tcp_rsk(req)->snt_isn = isn;
|
|
|
- tcp_rsk(req)->snt_synack = tcp_time_stamp;
|
|
|
- tcp_openreq_init_rwin(req, sk, dst);
|
|
|
- fastopen = !want_cookie &&
|
|
|
- tcp_try_fastopen(sk, skb, req, &foc, dst);
|
|
|
- err = tcp_v6_send_synack(sk, dst, &fl6, req,
|
|
|
- skb_get_queue_mapping(skb), &foc);
|
|
|
- if (!fastopen) {
|
|
|
- if (err || want_cookie)
|
|
|
- goto drop_and_free;
|
|
|
-
|
|
|
- tcp_rsk(req)->listener = NULL;
|
|
|
- inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
|
|
|
- }
|
|
|
- return 0;
|
|
|
-
|
|
|
-drop_and_release:
|
|
|
- dst_release(dst);
|
|
|
-drop_and_free:
|
|
|
- reqsk_free(req);
|
|
|
drop:
|
|
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
|
|
return 0; /* don't send reset */
|