|
@@ -787,16 +787,16 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
|
|
|
.queue_hash_add = inet6_csk_reqsk_queue_hash_add,
|
|
|
};
|
|
|
|
|
|
-static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
|
|
|
- u32 tsval, u32 tsecr, int oif,
|
|
|
- struct tcp_md5sig_key *key, int rst, u8 tclass,
|
|
|
- u32 label)
|
|
|
+static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
|
|
|
+ u32 ack, u32 win, u32 tsval, u32 tsecr,
|
|
|
+ int oif, struct tcp_md5sig_key *key, int rst,
|
|
|
+ u8 tclass, u32 label)
|
|
|
{
|
|
|
const struct tcphdr *th = tcp_hdr(skb);
|
|
|
struct tcphdr *t1;
|
|
|
struct sk_buff *buff;
|
|
|
struct flowi6 fl6;
|
|
|
- struct net *net = dev_net(skb_dst(skb)->dev);
|
|
|
+ struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
|
|
|
struct sock *ctl_sk = net->ipv6.tcp_sk;
|
|
|
unsigned int tot_len = sizeof(struct tcphdr);
|
|
|
struct dst_entry *dst;
|
|
@@ -946,7 +946,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
|
|
|
(th->doff << 2);
|
|
|
|
|
|
oif = sk ? sk->sk_bound_dev_if : 0;
|
|
|
- tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
|
|
|
+ tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
|
|
|
|
|
|
#ifdef CONFIG_TCP_MD5SIG
|
|
|
release_sk1:
|
|
@@ -957,13 +957,13 @@ release_sk1:
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
-static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
|
|
|
- u32 win, u32 tsval, u32 tsecr, int oif,
|
|
|
+static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
|
|
|
+ u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
|
|
|
struct tcp_md5sig_key *key, u8 tclass,
|
|
|
u32 label)
|
|
|
{
|
|
|
- tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
|
|
|
- label);
|
|
|
+ tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
|
|
|
+ tclass, label);
|
|
|
}
|
|
|
|
|
|
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
|
|
@@ -971,7 +971,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
|
|
|
struct inet_timewait_sock *tw = inet_twsk(sk);
|
|
|
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
|
|
|
|
|
|
- tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
|
|
|
+ tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
|
|
|
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
|
|
|
tcp_time_stamp + tcptw->tw_ts_offset,
|
|
|
tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
|
|
@@ -986,10 +986,10 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
|
|
|
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
|
|
|
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
|
|
|
*/
|
|
|
- tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
|
|
|
+ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
|
|
|
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
|
|
|
- tcp_rsk(req)->rcv_nxt,
|
|
|
- req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
|
|
|
+ tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
|
|
|
+ tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
|
|
|
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
|
|
|
0, 0);
|
|
|
}
|