|
@@ -822,7 +822,8 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
|
|
struct flowi *fl,
|
|
struct flowi *fl,
|
|
struct request_sock *req,
|
|
struct request_sock *req,
|
|
u16 queue_mapping,
|
|
u16 queue_mapping,
|
|
- struct tcp_fastopen_cookie *foc)
|
|
|
|
|
|
+ struct tcp_fastopen_cookie *foc,
|
|
|
|
+ bool attach_req)
|
|
{
|
|
{
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
|
struct flowi4 fl4;
|
|
struct flowi4 fl4;
|
|
@@ -833,7 +834,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
|
|
if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
|
|
if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
- skb = tcp_make_synack(sk, dst, req, foc);
|
|
|
|
|
|
+ skb = tcp_make_synack(sk, dst, req, foc, attach_req);
|
|
|
|
|
|
if (skb) {
|
|
if (skb) {
|
|
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
|
|
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
|
|
@@ -1112,10 +1113,13 @@ clear_hash_noput:
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
|
|
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
|
|
|
|
|
|
|
|
+#endif
|
|
|
|
+
|
|
/* Called with rcu_read_lock() */
|
|
/* Called with rcu_read_lock() */
|
|
-static bool tcp_v4_inbound_md5_hash(struct sock *sk,
|
|
|
|
|
|
+static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
|
|
const struct sk_buff *skb)
|
|
const struct sk_buff *skb)
|
|
{
|
|
{
|
|
|
|
+#ifdef CONFIG_TCP_MD5SIG
|
|
/*
|
|
/*
|
|
* This gets called for each TCP segment that arrives
|
|
* This gets called for each TCP segment that arrives
|
|
* so we want to be efficient.
|
|
* so we want to be efficient.
|
|
@@ -1165,8 +1169,9 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk,
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
return false;
|
|
-}
|
|
|
|
#endif
|
|
#endif
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
|
|
static void tcp_v4_init_req(struct request_sock *req,
|
|
static void tcp_v4_init_req(struct request_sock *req,
|
|
const struct sock *sk_listener,
|
|
const struct sock *sk_listener,
|
|
@@ -1220,7 +1225,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
|
|
.route_req = tcp_v4_route_req,
|
|
.route_req = tcp_v4_route_req,
|
|
.init_seq = tcp_v4_init_sequence,
|
|
.init_seq = tcp_v4_init_sequence,
|
|
.send_synack = tcp_v4_send_synack,
|
|
.send_synack = tcp_v4_send_synack,
|
|
- .queue_hash_add = inet_csk_reqsk_queue_hash_add,
|
|
|
|
};
|
|
};
|
|
|
|
|
|
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
|
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
|
@@ -1339,34 +1343,11 @@ put_and_exit:
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
|
|
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
|
|
|
|
|
|
-static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
|
|
|
|
|
|
+static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
{
|
|
|
|
+#ifdef CONFIG_SYN_COOKIES
|
|
const struct tcphdr *th = tcp_hdr(skb);
|
|
const struct tcphdr *th = tcp_hdr(skb);
|
|
- const struct iphdr *iph = ip_hdr(skb);
|
|
|
|
- struct request_sock *req;
|
|
|
|
- struct sock *nsk;
|
|
|
|
-
|
|
|
|
- req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
|
|
|
|
- if (req) {
|
|
|
|
- nsk = tcp_check_req(sk, skb, req, false);
|
|
|
|
- if (!nsk || nsk == sk)
|
|
|
|
- reqsk_put(req);
|
|
|
|
- return nsk;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
|
|
|
|
- th->source, iph->daddr, th->dest, inet_iif(skb));
|
|
|
|
-
|
|
|
|
- if (nsk) {
|
|
|
|
- if (nsk->sk_state != TCP_TIME_WAIT) {
|
|
|
|
- bh_lock_sock(nsk);
|
|
|
|
- return nsk;
|
|
|
|
- }
|
|
|
|
- inet_twsk_put(inet_twsk(nsk));
|
|
|
|
- return NULL;
|
|
|
|
- }
|
|
|
|
|
|
|
|
-#ifdef CONFIG_SYN_COOKIES
|
|
|
|
if (!th->syn)
|
|
if (!th->syn)
|
|
sk = cookie_v4_check(sk, skb);
|
|
sk = cookie_v4_check(sk, skb);
|
|
#endif
|
|
#endif
|
|
@@ -1374,7 +1355,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
|
|
}
|
|
}
|
|
|
|
|
|
/* The socket must have it's spinlock held when we get
|
|
/* The socket must have it's spinlock held when we get
|
|
- * here.
|
|
|
|
|
|
+ * here, unless it is a TCP_LISTEN socket.
|
|
*
|
|
*
|
|
* We have a potential double-lock case here, so even when
|
|
* We have a potential double-lock case here, so even when
|
|
* doing backlog processing we use the BH locking scheme.
|
|
* doing backlog processing we use the BH locking scheme.
|
|
@@ -1405,13 +1386,13 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|
goto csum_err;
|
|
goto csum_err;
|
|
|
|
|
|
if (sk->sk_state == TCP_LISTEN) {
|
|
if (sk->sk_state == TCP_LISTEN) {
|
|
- struct sock *nsk = tcp_v4_hnd_req(sk, skb);
|
|
|
|
|
|
+ struct sock *nsk = tcp_v4_cookie_check(sk, skb);
|
|
|
|
+
|
|
if (!nsk)
|
|
if (!nsk)
|
|
goto discard;
|
|
goto discard;
|
|
-
|
|
|
|
if (nsk != sk) {
|
|
if (nsk != sk) {
|
|
sock_rps_save_rxhash(nsk, skb);
|
|
sock_rps_save_rxhash(nsk, skb);
|
|
- sk_mark_napi_id(sk, skb);
|
|
|
|
|
|
+ sk_mark_napi_id(nsk, skb);
|
|
if (tcp_child_process(sk, nsk, skb)) {
|
|
if (tcp_child_process(sk, nsk, skb)) {
|
|
rsk = nsk;
|
|
rsk = nsk;
|
|
goto reset;
|
|
goto reset;
|
|
@@ -1599,6 +1580,29 @@ process:
|
|
if (sk->sk_state == TCP_TIME_WAIT)
|
|
if (sk->sk_state == TCP_TIME_WAIT)
|
|
goto do_time_wait;
|
|
goto do_time_wait;
|
|
|
|
|
|
|
|
+ if (sk->sk_state == TCP_NEW_SYN_RECV) {
|
|
|
|
+ struct request_sock *req = inet_reqsk(sk);
|
|
|
|
+ struct sock *nsk = NULL;
|
|
|
|
+
|
|
|
|
+ sk = req->rsk_listener;
|
|
|
|
+ if (tcp_v4_inbound_md5_hash(sk, skb))
|
|
|
|
+ goto discard_and_relse;
|
|
|
|
+ if (sk->sk_state == TCP_LISTEN)
|
|
|
|
+ nsk = tcp_check_req(sk, skb, req, false);
|
|
|
|
+ if (!nsk) {
|
|
|
|
+ reqsk_put(req);
|
|
|
|
+ goto discard_it;
|
|
|
|
+ }
|
|
|
|
+ if (nsk == sk) {
|
|
|
|
+ sock_hold(sk);
|
|
|
|
+ reqsk_put(req);
|
|
|
|
+ } else if (tcp_child_process(sk, nsk, skb)) {
|
|
|
|
+ tcp_v4_send_reset(nsk, skb);
|
|
|
|
+ goto discard_it;
|
|
|
|
+ } else {
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
|
|
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
|
|
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
|
|
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
|
|
goto discard_and_relse;
|
|
goto discard_and_relse;
|
|
@@ -1607,25 +1611,23 @@ process:
|
|
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
|
|
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
|
|
goto discard_and_relse;
|
|
goto discard_and_relse;
|
|
|
|
|
|
-#ifdef CONFIG_TCP_MD5SIG
|
|
|
|
- /*
|
|
|
|
- * We really want to reject the packet as early as possible
|
|
|
|
- * if:
|
|
|
|
- * o We're expecting an MD5'd packet and this is no MD5 tcp option
|
|
|
|
- * o There is an MD5 option and we're not expecting one
|
|
|
|
- */
|
|
|
|
if (tcp_v4_inbound_md5_hash(sk, skb))
|
|
if (tcp_v4_inbound_md5_hash(sk, skb))
|
|
goto discard_and_relse;
|
|
goto discard_and_relse;
|
|
-#endif
|
|
|
|
|
|
|
|
nf_reset(skb);
|
|
nf_reset(skb);
|
|
|
|
|
|
if (sk_filter(sk, skb))
|
|
if (sk_filter(sk, skb))
|
|
goto discard_and_relse;
|
|
goto discard_and_relse;
|
|
|
|
|
|
- sk_incoming_cpu_update(sk);
|
|
|
|
skb->dev = NULL;
|
|
skb->dev = NULL;
|
|
|
|
|
|
|
|
+ if (sk->sk_state == TCP_LISTEN) {
|
|
|
|
+ ret = tcp_v4_do_rcv(sk, skb);
|
|
|
|
+ goto put_and_return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ sk_incoming_cpu_update(sk);
|
|
|
|
+
|
|
bh_lock_sock_nested(sk);
|
|
bh_lock_sock_nested(sk);
|
|
tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
|
|
tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
|
|
ret = 0;
|
|
ret = 0;
|
|
@@ -1640,6 +1642,7 @@ process:
|
|
}
|
|
}
|
|
bh_unlock_sock(sk);
|
|
bh_unlock_sock(sk);
|
|
|
|
|
|
|
|
+put_and_return:
|
|
sock_put(sk);
|
|
sock_put(sk);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -1834,35 +1837,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
|
|
++st->num;
|
|
++st->num;
|
|
++st->offset;
|
|
++st->offset;
|
|
|
|
|
|
- if (st->state == TCP_SEQ_STATE_OPENREQ) {
|
|
|
|
- struct request_sock *req = cur;
|
|
|
|
-
|
|
|
|
- icsk = inet_csk(st->syn_wait_sk);
|
|
|
|
- req = req->dl_next;
|
|
|
|
- while (1) {
|
|
|
|
- while (req) {
|
|
|
|
- if (req->rsk_ops->family == st->family) {
|
|
|
|
- cur = req;
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
- req = req->dl_next;
|
|
|
|
- }
|
|
|
|
- if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
|
|
|
|
- break;
|
|
|
|
-get_req:
|
|
|
|
- req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
|
|
|
|
- }
|
|
|
|
- sk = sk_nulls_next(st->syn_wait_sk);
|
|
|
|
- st->state = TCP_SEQ_STATE_LISTENING;
|
|
|
|
- spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
|
|
|
|
- } else {
|
|
|
|
- icsk = inet_csk(sk);
|
|
|
|
- spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
|
|
|
|
- if (reqsk_queue_len(&icsk->icsk_accept_queue))
|
|
|
|
- goto start_req;
|
|
|
|
- spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
|
|
|
|
- sk = sk_nulls_next(sk);
|
|
|
|
- }
|
|
|
|
|
|
+ sk = sk_nulls_next(sk);
|
|
get_sk:
|
|
get_sk:
|
|
sk_nulls_for_each_from(sk, node) {
|
|
sk_nulls_for_each_from(sk, node) {
|
|
if (!net_eq(sock_net(sk), net))
|
|
if (!net_eq(sock_net(sk), net))
|
|
@@ -1872,16 +1847,6 @@ get_sk:
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
icsk = inet_csk(sk);
|
|
icsk = inet_csk(sk);
|
|
- spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
|
|
|
|
- if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
|
|
|
|
-start_req:
|
|
|
|
- st->uid = sock_i_uid(sk);
|
|
|
|
- st->syn_wait_sk = sk;
|
|
|
|
- st->state = TCP_SEQ_STATE_OPENREQ;
|
|
|
|
- st->sbucket = 0;
|
|
|
|
- goto get_req;
|
|
|
|
- }
|
|
|
|
- spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
|
|
|
|
}
|
|
}
|
|
spin_unlock_bh(&ilb->lock);
|
|
spin_unlock_bh(&ilb->lock);
|
|
st->offset = 0;
|
|
st->offset = 0;
|
|
@@ -2013,7 +1978,6 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
|
|
void *rc = NULL;
|
|
void *rc = NULL;
|
|
|
|
|
|
switch (st->state) {
|
|
switch (st->state) {
|
|
- case TCP_SEQ_STATE_OPENREQ:
|
|
|
|
case TCP_SEQ_STATE_LISTENING:
|
|
case TCP_SEQ_STATE_LISTENING:
|
|
if (st->bucket >= INET_LHTABLE_SIZE)
|
|
if (st->bucket >= INET_LHTABLE_SIZE)
|
|
break;
|
|
break;
|
|
@@ -2072,7 +2036,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
}
|
|
}
|
|
|
|
|
|
switch (st->state) {
|
|
switch (st->state) {
|
|
- case TCP_SEQ_STATE_OPENREQ:
|
|
|
|
case TCP_SEQ_STATE_LISTENING:
|
|
case TCP_SEQ_STATE_LISTENING:
|
|
rc = listening_get_next(seq, v);
|
|
rc = listening_get_next(seq, v);
|
|
if (!rc) {
|
|
if (!rc) {
|
|
@@ -2097,11 +2060,6 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
|
|
struct tcp_iter_state *st = seq->private;
|
|
struct tcp_iter_state *st = seq->private;
|
|
|
|
|
|
switch (st->state) {
|
|
switch (st->state) {
|
|
- case TCP_SEQ_STATE_OPENREQ:
|
|
|
|
- if (v) {
|
|
|
|
- struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
|
|
|
|
- spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
|
|
|
|
- }
|
|
|
|
case TCP_SEQ_STATE_LISTENING:
|
|
case TCP_SEQ_STATE_LISTENING:
|
|
if (v != SEQ_START_TOKEN)
|
|
if (v != SEQ_START_TOKEN)
|
|
spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
|
|
spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
|
|
@@ -2155,7 +2113,7 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
|
|
EXPORT_SYMBOL(tcp_proc_unregister);
|
|
EXPORT_SYMBOL(tcp_proc_unregister);
|
|
|
|
|
|
static void get_openreq4(const struct request_sock *req,
|
|
static void get_openreq4(const struct request_sock *req,
|
|
- struct seq_file *f, int i, kuid_t uid)
|
|
|
|
|
|
+ struct seq_file *f, int i)
|
|
{
|
|
{
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
|
long delta = req->rsk_timer.expires - jiffies;
|
|
long delta = req->rsk_timer.expires - jiffies;
|
|
@@ -2172,7 +2130,8 @@ static void get_openreq4(const struct request_sock *req,
|
|
1, /* timers active (only the expire timer) */
|
|
1, /* timers active (only the expire timer) */
|
|
jiffies_delta_to_clock_t(delta),
|
|
jiffies_delta_to_clock_t(delta),
|
|
req->num_timeout,
|
|
req->num_timeout,
|
|
- from_kuid_munged(seq_user_ns(f), uid),
|
|
|
|
|
|
+ from_kuid_munged(seq_user_ns(f),
|
|
|
|
+ sock_i_uid(req->rsk_listener)),
|
|
0, /* non standard timer */
|
|
0, /* non standard timer */
|
|
0, /* open_requests have no inode */
|
|
0, /* open_requests have no inode */
|
|
0,
|
|
0,
|
|
@@ -2273,18 +2232,12 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
|
|
}
|
|
}
|
|
st = seq->private;
|
|
st = seq->private;
|
|
|
|
|
|
- switch (st->state) {
|
|
|
|
- case TCP_SEQ_STATE_LISTENING:
|
|
|
|
- case TCP_SEQ_STATE_ESTABLISHED:
|
|
|
|
- if (sk->sk_state == TCP_TIME_WAIT)
|
|
|
|
- get_timewait4_sock(v, seq, st->num);
|
|
|
|
- else
|
|
|
|
- get_tcp4_sock(v, seq, st->num);
|
|
|
|
- break;
|
|
|
|
- case TCP_SEQ_STATE_OPENREQ:
|
|
|
|
- get_openreq4(v, seq, st->num, st->uid);
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
|
|
+ if (sk->sk_state == TCP_TIME_WAIT)
|
|
|
|
+ get_timewait4_sock(v, seq, st->num);
|
|
|
|
+ else if (sk->sk_state == TCP_NEW_SYN_RECV)
|
|
|
|
+ get_openreq4(v, seq, st->num);
|
|
|
|
+ else
|
|
|
|
+ get_tcp4_sock(v, seq, st->num);
|
|
out:
|
|
out:
|
|
seq_pad(seq, '\n');
|
|
seq_pad(seq, '\n');
|
|
return 0;
|
|
return 0;
|