|
@@ -403,18 +403,17 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
|
|
|
struct flowi4 *fl4,
|
|
|
const struct request_sock *req)
|
|
|
{
|
|
|
- struct rtable *rt;
|
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
|
|
- struct ip_options_rcu *opt = inet_rsk(req)->opt;
|
|
|
- struct net *net = sock_net(sk);
|
|
|
- int flags = inet_sk_flowi_flags(sk);
|
|
|
+ struct net *net = read_pnet(&ireq->ireq_net);
|
|
|
+ struct ip_options_rcu *opt = ireq->opt;
|
|
|
+ struct rtable *rt;
|
|
|
|
|
|
- flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
|
|
|
+ flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
|
|
|
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
|
|
|
- sk->sk_protocol,
|
|
|
- flags,
|
|
|
+ sk->sk_protocol, inet_sk_flowi_flags(sk),
|
|
|
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
|
|
|
- ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
|
|
|
+ ireq->ir_loc_addr, ireq->ir_rmt_port,
|
|
|
+ htons(ireq->ir_num));
|
|
|
security_req_classify_flow(req, flowi4_to_flowi(fl4));
|
|
|
rt = ip_route_output_flow(net, fl4, sk);
|
|
|
if (IS_ERR(rt))
|
|
@@ -436,9 +435,9 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
|
|
|
const struct request_sock *req)
|
|
|
{
|
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
|
|
+ struct net *net = read_pnet(&ireq->ireq_net);
|
|
|
struct inet_sock *newinet = inet_sk(newsk);
|
|
|
struct ip_options_rcu *opt;
|
|
|
- struct net *net = sock_net(sk);
|
|
|
struct flowi4 *fl4;
|
|
|
struct rtable *rt;
|
|
|
|
|
@@ -446,11 +445,12 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
|
|
|
|
|
|
rcu_read_lock();
|
|
|
opt = rcu_dereference(newinet->inet_opt);
|
|
|
- flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
|
|
|
+ flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
|
|
|
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
|
|
|
sk->sk_protocol, inet_sk_flowi_flags(sk),
|
|
|
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
|
|
|
- ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
|
|
|
+ ireq->ir_loc_addr, ireq->ir_rmt_port,
|
|
|
+ htons(ireq->ir_num));
|
|
|
security_req_classify_flow(req, flowi4_to_flowi(fl4));
|
|
|
rt = ip_route_output_flow(net, fl4, sk);
|
|
|
if (IS_ERR(rt))
|
|
@@ -495,7 +495,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk,
|
|
|
u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
|
|
|
lopt->nr_table_entries);
|
|
|
|
|
|
- write_lock(&icsk->icsk_accept_queue.syn_wait_lock);
|
|
|
+ spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
|
|
|
for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
|
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
|
|
|
|
@@ -508,7 +508,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk,
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
- write_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
|
|
|
+ spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
|
|
|
|
|
|
return req;
|
|
|
}
|
|
@@ -571,8 +571,9 @@ static void reqsk_timer_handler(unsigned long data)
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk_listener);
|
|
|
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
|
|
|
struct listen_sock *lopt = queue->listen_opt;
|
|
|
- int expire = 0, resend = 0;
|
|
|
+ int qlen, expire = 0, resend = 0;
|
|
|
int max_retries, thresh;
|
|
|
+ u8 defer_accept;
|
|
|
|
|
|
if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
|
|
|
reqsk_put(req);
|
|
@@ -598,21 +599,23 @@ static void reqsk_timer_handler(unsigned long data)
|
|
|
* embrions; and abort old ones without pity, if old
|
|
|
* ones are about to clog our table.
|
|
|
*/
|
|
|
- if (listen_sock_qlen(lopt) >> (lopt->max_qlen_log - 1)) {
|
|
|
+ qlen = listen_sock_qlen(lopt);
|
|
|
+ if (qlen >> (lopt->max_qlen_log - 1)) {
|
|
|
int young = listen_sock_young(lopt) << 1;
|
|
|
|
|
|
while (thresh > 2) {
|
|
|
- if (listen_sock_qlen(lopt) < young)
|
|
|
+ if (qlen < young)
|
|
|
break;
|
|
|
thresh--;
|
|
|
young <<= 1;
|
|
|
}
|
|
|
}
|
|
|
- if (queue->rskq_defer_accept)
|
|
|
- max_retries = queue->rskq_defer_accept;
|
|
|
- syn_ack_recalc(req, thresh, max_retries, queue->rskq_defer_accept,
|
|
|
+ defer_accept = READ_ONCE(queue->rskq_defer_accept);
|
|
|
+ if (defer_accept)
|
|
|
+ max_retries = defer_accept;
|
|
|
+ syn_ack_recalc(req, thresh, max_retries, defer_accept,
|
|
|
&expire, &resend);
|
|
|
- req->rsk_ops->syn_ack_timeout(sk_listener, req);
|
|
|
+ req->rsk_ops->syn_ack_timeout(req);
|
|
|
if (!expire &&
|
|
|
(!resend ||
|
|
|
!inet_rtx_syn_ack(sk_listener, req) ||
|
|
@@ -647,10 +650,10 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue,
|
|
|
setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
|
|
|
req->rsk_hash = hash;
|
|
|
|
|
|
- write_lock(&queue->syn_wait_lock);
|
|
|
+ spin_lock(&queue->syn_wait_lock);
|
|
|
req->dl_next = lopt->syn_table[hash];
|
|
|
lopt->syn_table[hash] = req;
|
|
|
- write_unlock(&queue->syn_wait_lock);
|
|
|
+ spin_unlock(&queue->syn_wait_lock);
|
|
|
|
|
|
mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
|
|
|
}
|