|
|
@@ -121,8 +121,7 @@ int sysctl_tcp_abc __read_mostly;
|
|
|
/* Adapt the MSS value used to make delayed ack decision to the
|
|
|
* real world.
|
|
|
*/
|
|
|
-static void tcp_measure_rcv_mss(struct sock *sk,
|
|
|
- const struct sk_buff *skb)
|
|
|
+static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
|
|
|
{
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
const unsigned int lss = icsk->icsk_ack.last_seg_size;
|
|
|
@@ -133,7 +132,7 @@ static void tcp_measure_rcv_mss(struct sock *sk,
|
|
|
/* skb->len may jitter because of SACKs, even if peer
|
|
|
* sends good full-sized frames.
|
|
|
*/
|
|
|
- len = skb_shinfo(skb)->gso_size ?: skb->len;
|
|
|
+ len = skb_shinfo(skb)->gso_size ? : skb->len;
|
|
|
if (len >= icsk->icsk_ack.rcv_mss) {
|
|
|
icsk->icsk_ack.rcv_mss = len;
|
|
|
} else {
|
|
|
@@ -173,8 +172,8 @@ static void tcp_incr_quickack(struct sock *sk)
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
|
|
|
|
|
|
- if (quickacks==0)
|
|
|
- quickacks=2;
|
|
|
+ if (quickacks == 0)
|
|
|
+ quickacks = 2;
|
|
|
if (quickacks > icsk->icsk_ack.quick)
|
|
|
icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
|
|
|
}
|
|
|
@@ -199,7 +198,7 @@ static inline int tcp_in_quickack_mode(const struct sock *sk)
|
|
|
|
|
|
static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
|
|
|
{
|
|
|
- if (tp->ecn_flags&TCP_ECN_OK)
|
|
|
+ if (tp->ecn_flags & TCP_ECN_OK)
|
|
|
tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
|
|
|
}
|
|
|
|
|
|
@@ -216,7 +215,7 @@ static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
|
|
|
|
|
|
static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
|
|
|
{
|
|
|
- if (tp->ecn_flags&TCP_ECN_OK) {
|
|
|
+ if (tp->ecn_flags & TCP_ECN_OK) {
|
|
|
if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))
|
|
|
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
|
|
|
/* Funny extension: if ECT is not set on a segment,
|
|
|
@@ -229,19 +228,19 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
|
|
|
|
|
|
static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th)
|
|
|
{
|
|
|
- if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || th->cwr))
|
|
|
+ if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
|
|
|
tp->ecn_flags &= ~TCP_ECN_OK;
|
|
|
}
|
|
|
|
|
|
static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th)
|
|
|
{
|
|
|
- if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || !th->cwr))
|
|
|
+ if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
|
|
|
tp->ecn_flags &= ~TCP_ECN_OK;
|
|
|
}
|
|
|
|
|
|
static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
|
|
|
{
|
|
|
- if (th->ece && !th->syn && (tp->ecn_flags&TCP_ECN_OK))
|
|
|
+ if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
|
|
|
return 1;
|
|
|
return 0;
|
|
|
}
|
|
|
@@ -303,8 +302,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void tcp_grow_window(struct sock *sk,
|
|
|
- struct sk_buff *skb)
|
|
|
+static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
@@ -318,12 +316,13 @@ static void tcp_grow_window(struct sock *sk,
|
|
|
* will fit to rcvbuf in future.
|
|
|
*/
|
|
|
if (tcp_win_from_space(skb->truesize) <= skb->len)
|
|
|
- incr = 2*tp->advmss;
|
|
|
+ incr = 2 * tp->advmss;
|
|
|
else
|
|
|
incr = __tcp_grow_window(sk, skb);
|
|
|
|
|
|
if (incr) {
|
|
|
- tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
|
|
|
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
|
|
|
+ tp->window_clamp);
|
|
|
inet_csk(sk)->icsk_ack.quick |= 1;
|
|
|
}
|
|
|
}
|
|
|
@@ -398,10 +397,9 @@ static void tcp_clamp_window(struct sock *sk)
|
|
|
sysctl_tcp_rmem[2]);
|
|
|
}
|
|
|
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
|
|
|
- tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
|
|
|
+ tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/* Initialize RCV_MSS value.
|
|
|
* RCV_MSS is an our guess about MSS used by the peer.
|
|
|
* We haven't any direct information about the MSS.
|
|
|
@@ -414,7 +412,7 @@ void tcp_initialize_rcv_mss(struct sock *sk)
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
|
|
|
|
|
|
- hint = min(hint, tp->rcv_wnd/2);
|
|
|
+ hint = min(hint, tp->rcv_wnd / 2);
|
|
|
hint = min(hint, TCP_MIN_RCVMSS);
|
|
|
hint = max(hint, TCP_MIN_MSS);
|
|
|
|
|
|
@@ -471,16 +469,15 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
|
|
|
goto new_measure;
|
|
|
if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
|
|
|
return;
|
|
|
- tcp_rcv_rtt_update(tp,
|
|
|
- jiffies - tp->rcv_rtt_est.time,
|
|
|
- 1);
|
|
|
+ tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);
|
|
|
|
|
|
new_measure:
|
|
|
tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
|
|
|
tp->rcv_rtt_est.time = tcp_time_stamp;
|
|
|
}
|
|
|
|
|
|
-static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb)
|
|
|
+static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
|
|
|
+ const struct sk_buff *skb)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
if (tp->rx_opt.rcv_tsecr &&
|
|
|
@@ -503,8 +500,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
|
|
goto new_measure;
|
|
|
|
|
|
time = tcp_time_stamp - tp->rcvq_space.time;
|
|
|
- if (time < (tp->rcv_rtt_est.rtt >> 3) ||
|
|
|
- tp->rcv_rtt_est.rtt == 0)
|
|
|
+ if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
|
|
|
return;
|
|
|
|
|
|
space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
|
|
|
@@ -580,7 +576,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
|
|
|
} else {
|
|
|
int m = now - icsk->icsk_ack.lrcvtime;
|
|
|
|
|
|
- if (m <= TCP_ATO_MIN/2) {
|
|
|
+ if (m <= TCP_ATO_MIN / 2) {
|
|
|
/* The fastest case is the first. */
|
|
|
icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
|
|
|
} else if (m < icsk->icsk_ack.ato) {
|
|
|
@@ -609,7 +605,7 @@ static u32 tcp_rto_min(struct sock *sk)
|
|
|
u32 rto_min = TCP_RTO_MIN;
|
|
|
|
|
|
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
|
|
|
- rto_min = dst->metrics[RTAX_RTO_MIN-1];
|
|
|
+ rto_min = dst->metrics[RTAX_RTO_MIN - 1];
|
|
|
return rto_min;
|
|
|
}
|
|
|
|
|
|
@@ -672,14 +668,14 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
|
|
|
}
|
|
|
if (after(tp->snd_una, tp->rtt_seq)) {
|
|
|
if (tp->mdev_max < tp->rttvar)
|
|
|
- tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
|
|
|
+ tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2;
|
|
|
tp->rtt_seq = tp->snd_nxt;
|
|
|
tp->mdev_max = tcp_rto_min(sk);
|
|
|
}
|
|
|
} else {
|
|
|
/* no previous measure. */
|
|
|
- tp->srtt = m<<3; /* take the measured time to be rtt */
|
|
|
- tp->mdev = m<<1; /* make sure rto = 3*rtt */
|
|
|
+ tp->srtt = m << 3; /* take the measured time to be rtt */
|
|
|
+ tp->mdev = m << 1; /* make sure rto = 3*rtt */
|
|
|
tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
|
|
|
tp->rtt_seq = tp->snd_nxt;
|
|
|
}
|
|
|
@@ -733,7 +729,7 @@ void tcp_update_metrics(struct sock *sk)
|
|
|
|
|
|
dst_confirm(dst);
|
|
|
|
|
|
- if (dst && (dst->flags&DST_HOST)) {
|
|
|
+ if (dst && (dst->flags & DST_HOST)) {
|
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
int m;
|
|
|
|
|
|
@@ -743,7 +739,7 @@ void tcp_update_metrics(struct sock *sk)
|
|
|
* Reset our results.
|
|
|
*/
|
|
|
if (!(dst_metric_locked(dst, RTAX_RTT)))
|
|
|
- dst->metrics[RTAX_RTT-1] = 0;
|
|
|
+ dst->metrics[RTAX_RTT - 1] = 0;
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
@@ -755,9 +751,9 @@ void tcp_update_metrics(struct sock *sk)
|
|
|
*/
|
|
|
if (!(dst_metric_locked(dst, RTAX_RTT))) {
|
|
|
if (m <= 0)
|
|
|
- dst->metrics[RTAX_RTT-1] = tp->srtt;
|
|
|
+ dst->metrics[RTAX_RTT - 1] = tp->srtt;
|
|
|
else
|
|
|
- dst->metrics[RTAX_RTT-1] -= (m>>3);
|
|
|
+ dst->metrics[RTAX_RTT - 1] -= (m >> 3);
|
|
|
}
|
|
|
|
|
|
if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
|
|
|
@@ -770,7 +766,7 @@ void tcp_update_metrics(struct sock *sk)
|
|
|
m = tp->mdev;
|
|
|
|
|
|
if (m >= dst_metric(dst, RTAX_RTTVAR))
|
|
|
- dst->metrics[RTAX_RTTVAR-1] = m;
|
|
|
+ dst->metrics[RTAX_RTTVAR - 1] = m;
|
|
|
else
|
|
|
dst->metrics[RTAX_RTTVAR-1] -=
|
|
|
(dst->metrics[RTAX_RTTVAR-1] - m)>>2;
|
|
|
@@ -784,7 +780,7 @@ void tcp_update_metrics(struct sock *sk)
|
|
|
dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
|
|
|
if (!dst_metric_locked(dst, RTAX_CWND) &&
|
|
|
tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
|
|
|
- dst->metrics[RTAX_CWND-1] = tp->snd_cwnd;
|
|
|
+ dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd;
|
|
|
} else if (tp->snd_cwnd > tp->snd_ssthresh &&
|
|
|
icsk->icsk_ca_state == TCP_CA_Open) {
|
|
|
/* Cong. avoidance phase, cwnd is reliable. */
|
|
|
@@ -1353,12 +1349,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
|
|
|
}
|
|
|
|
|
|
if (in_sack <= 0)
|
|
|
- in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
|
|
|
+ in_sack = tcp_match_skb_to_sack(sk, skb, start_seq,
|
|
|
+ end_seq);
|
|
|
if (unlikely(in_sack < 0))
|
|
|
break;
|
|
|
|
|
|
if (in_sack)
|
|
|
- *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack, *fack_count);
|
|
|
+ *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack,
|
|
|
+ *fack_count);
|
|
|
|
|
|
*fack_count += tcp_skb_pcount(skb);
|
|
|
}
|
|
|
@@ -1407,7 +1405,8 @@ static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache)
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
|
|
|
+tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
|
|
|
+ u32 prior_snd_una)
|
|
|
{
|
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
@@ -1417,7 +1416,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
|
|
struct tcp_sack_block sp[4];
|
|
|
struct tcp_sack_block *cache;
|
|
|
struct sk_buff *skb;
|
|
|
- int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
|
|
|
+ int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE) >> 3;
|
|
|
int used_sacks;
|
|
|
int reord = tp->packets_out;
|
|
|
int flag = 0;
|
|
|
@@ -1484,17 +1483,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
|
|
|
|
|
/* order SACK blocks to allow in order walk of the retrans queue */
|
|
|
for (i = used_sacks - 1; i > 0; i--) {
|
|
|
- for (j = 0; j < i; j++){
|
|
|
- if (after(sp[j].start_seq, sp[j+1].start_seq)) {
|
|
|
+ for (j = 0; j < i; j++) {
|
|
|
+ if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
|
|
|
struct tcp_sack_block tmp;
|
|
|
|
|
|
tmp = sp[j];
|
|
|
- sp[j] = sp[j+1];
|
|
|
- sp[j+1] = tmp;
|
|
|
+ sp[j] = sp[j + 1];
|
|
|
+ sp[j + 1] = tmp;
|
|
|
|
|
|
/* Track where the first SACK block goes to */
|
|
|
if (j == first_sack_index)
|
|
|
- first_sack_index = j+1;
|
|
|
+ first_sack_index = j + 1;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
@@ -1539,17 +1538,21 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
|
|
/* Head todo? */
|
|
|
if (before(start_seq, cache->start_seq)) {
|
|
|
skb = tcp_sacktag_skip(skb, sk, start_seq);
|
|
|
- skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq,
|
|
|
- cache->start_seq, dup_sack,
|
|
|
- &fack_count, &reord, &flag);
|
|
|
+ skb = tcp_sacktag_walk(skb, sk, next_dup,
|
|
|
+ start_seq,
|
|
|
+ cache->start_seq,
|
|
|
+ dup_sack, &fack_count,
|
|
|
+ &reord, &flag);
|
|
|
}
|
|
|
|
|
|
/* Rest of the block already fully processed? */
|
|
|
if (!after(end_seq, cache->end_seq))
|
|
|
goto advance_sp;
|
|
|
|
|
|
- skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, cache->end_seq,
|
|
|
- &fack_count, &reord, &flag);
|
|
|
+ skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
|
|
|
+ cache->end_seq,
|
|
|
+ &fack_count, &reord,
|
|
|
+ &flag);
|
|
|
|
|
|
/* ...tail remains todo... */
|
|
|
if (tcp_highest_sack_seq(tp) == cache->end_seq) {
|
|
|
@@ -1654,10 +1657,10 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked)
|
|
|
|
|
|
if (acked > 0) {
|
|
|
/* One ACK acked hole. The rest eat duplicate ACKs. */
|
|
|
- if (acked-1 >= tp->sacked_out)
|
|
|
+ if (acked - 1 >= tp->sacked_out)
|
|
|
tp->sacked_out = 0;
|
|
|
else
|
|
|
- tp->sacked_out -= acked-1;
|
|
|
+ tp->sacked_out -= acked - 1;
|
|
|
}
|
|
|
tcp_check_reno_reordering(sk, acked);
|
|
|
tcp_verify_left_out(tp);
|
|
|
@@ -1691,10 +1694,10 @@ int tcp_use_frto(struct sock *sk)
|
|
|
tcp_for_write_queue_from(skb, sk) {
|
|
|
if (skb == tcp_send_head(sk))
|
|
|
break;
|
|
|
- if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
|
|
|
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
|
|
|
return 0;
|
|
|
/* Short-circuit when first non-SACKed skb has been checked */
|
|
|
- if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED))
|
|
|
+ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
|
|
|
break;
|
|
|
}
|
|
|
return 1;
|
|
|
@@ -1804,7 +1807,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
|
|
|
* Count the retransmission made on RTO correctly (only when
|
|
|
* waiting for the first ACK and did not get it)...
|
|
|
*/
|
|
|
- if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) {
|
|
|
+ if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) {
|
|
|
/* For some reason this R-bit might get cleared? */
|
|
|
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
|
|
|
tp->retrans_out += tcp_skb_pcount(skb);
|
|
|
@@ -1817,7 +1820,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
|
|
|
}
|
|
|
|
|
|
/* Don't lost mark skbs that were fwd transmitted after RTO */
|
|
|
- if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) &&
|
|
|
+ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) &&
|
|
|
!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) {
|
|
|
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
|
|
|
tp->lost_out += tcp_skb_pcount(skb);
|
|
|
@@ -1832,7 +1835,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
|
|
|
tp->bytes_acked = 0;
|
|
|
|
|
|
tp->reordering = min_t(unsigned int, tp->reordering,
|
|
|
- sysctl_tcp_reordering);
|
|
|
+ sysctl_tcp_reordering);
|
|
|
tcp_set_ca_state(sk, TCP_CA_Loss);
|
|
|
tp->high_seq = tp->frto_highmark;
|
|
|
TCP_ECN_queue_cwr(tp);
|
|
|
@@ -1899,7 +1902,7 @@ void tcp_enter_loss(struct sock *sk, int how)
|
|
|
if (skb == tcp_send_head(sk))
|
|
|
break;
|
|
|
|
|
|
- if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
|
|
|
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
|
|
|
tp->undo_marker = 0;
|
|
|
TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
|
|
|
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
|
|
|
@@ -1911,7 +1914,7 @@ void tcp_enter_loss(struct sock *sk, int how)
|
|
|
tcp_verify_left_out(tp);
|
|
|
|
|
|
tp->reordering = min_t(unsigned int, tp->reordering,
|
|
|
- sysctl_tcp_reordering);
|
|
|
+ sysctl_tcp_reordering);
|
|
|
tcp_set_ca_state(sk, TCP_CA_Loss);
|
|
|
tp->high_seq = tp->snd_nxt;
|
|
|
TCP_ECN_queue_cwr(tp);
|
|
|
@@ -1943,7 +1946,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
|
|
|
|
|
|
static inline int tcp_fackets_out(struct tcp_sock *tp)
|
|
|
{
|
|
|
- return tcp_is_reno(tp) ? tp->sacked_out+1 : tp->fackets_out;
|
|
|
+ return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out;
|
|
|
}
|
|
|
|
|
|
/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs
|
|
|
@@ -2116,12 +2119,11 @@ static int tcp_time_to_recover(struct sock *sk)
|
|
|
* retransmitted past LOST markings in the first place? I'm not fully sure
|
|
|
* about undo and end of connection cases, which can cause R without L?
|
|
|
*/
|
|
|
-static void tcp_verify_retransmit_hint(struct tcp_sock *tp,
|
|
|
- struct sk_buff *skb)
|
|
|
+static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
|
|
|
{
|
|
|
if ((tp->retransmit_skb_hint != NULL) &&
|
|
|
before(TCP_SKB_CB(skb)->seq,
|
|
|
- TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
|
|
|
+ TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
|
|
|
tp->retransmit_skb_hint = NULL;
|
|
|
}
|
|
|
|
|
|
@@ -2156,7 +2158,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit)
|
|
|
cnt += tcp_skb_pcount(skb);
|
|
|
|
|
|
if (((!fast_rexmit || (tp->lost_out > 0)) && (cnt > packets)) ||
|
|
|
- after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
|
|
|
+ after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
|
|
|
break;
|
|
|
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
|
|
|
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
|
|
|
@@ -2223,7 +2225,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
|
|
|
static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
|
|
|
{
|
|
|
tp->snd_cwnd = min(tp->snd_cwnd,
|
|
|
- tcp_packets_in_flight(tp)+tcp_max_burst(tp));
|
|
|
+ tcp_packets_in_flight(tp) + tcp_max_burst(tp));
|
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
|
}
|
|
|
|
|
|
@@ -2243,15 +2245,15 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
int decr = tp->snd_cwnd_cnt + 1;
|
|
|
|
|
|
- if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) ||
|
|
|
- (tcp_is_reno(tp) && !(flag&FLAG_NOT_DUP))) {
|
|
|
- tp->snd_cwnd_cnt = decr&1;
|
|
|
+ if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) ||
|
|
|
+ (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) {
|
|
|
+ tp->snd_cwnd_cnt = decr & 1;
|
|
|
decr >>= 1;
|
|
|
|
|
|
if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
|
|
|
tp->snd_cwnd -= decr;
|
|
|
|
|
|
- tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
|
|
|
+ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
|
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
|
}
|
|
|
}
|
|
|
@@ -2295,7 +2297,7 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
|
|
|
if (icsk->icsk_ca_ops->undo_cwnd)
|
|
|
tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
|
|
|
else
|
|
|
- tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
|
|
|
+ tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
|
|
|
|
|
|
if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
|
|
|
tp->snd_ssthresh = tp->prior_ssthresh;
|
|
|
@@ -2314,8 +2316,7 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
|
|
|
|
|
|
static inline int tcp_may_undo(struct tcp_sock *tp)
|
|
|
{
|
|
|
- return tp->undo_marker &&
|
|
|
- (!tp->undo_retrans || tcp_packet_delayed(tp));
|
|
|
+ return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
|
|
|
}
|
|
|
|
|
|
/* People celebrate: "We love our President!" */
|
|
|
@@ -2434,7 +2435,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
|
|
|
if (tp->retrans_out == 0)
|
|
|
tp->retrans_stamp = 0;
|
|
|
|
|
|
- if (flag&FLAG_ECE)
|
|
|
+ if (flag & FLAG_ECE)
|
|
|
tcp_enter_cwr(sk, 1);
|
|
|
|
|
|
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
|
|
|
@@ -2480,7 +2481,6 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
|
|
|
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/* Process an event, which can update packets-in-flight not trivially.
|
|
|
* Main goal of this function is to calculate new estimate for left_out,
|
|
|
* taking into account both packets sitting in receiver's buffer and
|
|
|
@@ -2492,13 +2492,12 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
|
|
|
* It does _not_ decide what to send, it is made in function
|
|
|
* tcp_xmit_retransmit_queue().
|
|
|
*/
|
|
|
-static void
|
|
|
-tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
|
|
|
+static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
|
|
|
{
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
- int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP));
|
|
|
- int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) &&
|
|
|
+ int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
|
|
|
+ int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
|
|
|
(tcp_fackets_out(tp) > tp->reordering));
|
|
|
int fast_rexmit = 0;
|
|
|
|
|
|
@@ -2509,7 +2508,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
|
|
|
|
|
|
/* Now state machine starts.
|
|
|
* A. ECE, hence prohibit cwnd undoing, the reduction is required. */
|
|
|
- if (flag&FLAG_ECE)
|
|
|
+ if (flag & FLAG_ECE)
|
|
|
tp->prior_ssthresh = 0;
|
|
|
|
|
|
/* B. In all the states check for reneging SACKs. */
|
|
|
@@ -2521,7 +2520,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
|
|
|
before(tp->snd_una, tp->high_seq) &&
|
|
|
icsk->icsk_ca_state != TCP_CA_Open &&
|
|
|
tp->fackets_out > tp->reordering) {
|
|
|
- tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, 0);
|
|
|
+ tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
|
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
|
|
|
}
|
|
|
|
|
|
@@ -2581,7 +2580,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
|
|
|
do_lost = tcp_try_undo_partial(sk, pkts_acked);
|
|
|
break;
|
|
|
case TCP_CA_Loss:
|
|
|
- if (flag&FLAG_DATA_ACKED)
|
|
|
+ if (flag & FLAG_DATA_ACKED)
|
|
|
icsk->icsk_retransmits = 0;
|
|
|
if (!tcp_try_undo_loss(sk)) {
|
|
|
tcp_moderate_cwnd(tp);
|
|
|
@@ -2631,7 +2630,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
|
|
|
tp->undo_retrans = tp->retrans_out;
|
|
|
|
|
|
if (icsk->icsk_ca_state < TCP_CA_CWR) {
|
|
|
- if (!(flag&FLAG_ECE))
|
|
|
+ if (!(flag & FLAG_ECE))
|
|
|
tp->prior_ssthresh = tcp_current_ssthresh(sk);
|
|
|
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
|
|
|
TCP_ECN_queue_cwr(tp);
|
|
|
@@ -2725,7 +2724,8 @@ static void tcp_rearm_rto(struct sock *sk)
|
|
|
if (!tp->packets_out) {
|
|
|
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
|
|
|
} else {
|
|
|
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
|
|
|
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
|
|
|
+ inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -2803,8 +2803,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
|
|
|
flag |= FLAG_RETRANS_DATA_ACKED;
|
|
|
ca_seq_rtt = -1;
|
|
|
seq_rtt = -1;
|
|
|
- if ((flag & FLAG_DATA_ACKED) ||
|
|
|
- (acked_pcount > 1))
|
|
|
+ if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1))
|
|
|
flag |= FLAG_NONHEAD_RETRANS_ACKED;
|
|
|
} else {
|
|
|
ca_seq_rtt = now - scb->when;
|
|
|
@@ -2950,8 +2949,9 @@ static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
|
|
|
/* Check that window update is acceptable.
|
|
|
* The function assumes that snd_una<=ack<=snd_next.
|
|
|
*/
|
|
|
-static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
|
|
|
- const u32 ack_seq, const u32 nwin)
|
|
|
+static inline int tcp_may_update_window(const struct tcp_sock *tp,
|
|
|
+ const u32 ack, const u32 ack_seq,
|
|
|
+ const u32 nwin)
|
|
|
{
|
|
|
return (after(ack, tp->snd_una) ||
|
|
|
after(ack_seq, tp->snd_wl1) ||
|
|
|
@@ -3020,7 +3020,7 @@ static void tcp_ratehalving_spur_to_response(struct sock *sk)
|
|
|
|
|
|
static void tcp_undo_spur_to_response(struct sock *sk, int flag)
|
|
|
{
|
|
|
- if (flag&FLAG_ECE)
|
|
|
+ if (flag & FLAG_ECE)
|
|
|
tcp_ratehalving_spur_to_response(sk);
|
|
|
else
|
|
|
tcp_undo_cwr(sk, 1);
|
|
|
@@ -3063,7 +3063,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
|
|
|
tcp_verify_left_out(tp);
|
|
|
|
|
|
/* Duplicate the behavior from Loss state (fastretrans_alert) */
|
|
|
- if (flag&FLAG_DATA_ACKED)
|
|
|
+ if (flag & FLAG_DATA_ACKED)
|
|
|
inet_csk(sk)->icsk_retransmits = 0;
|
|
|
|
|
|
if ((flag & FLAG_NONHEAD_RETRANS_ACKED) ||
|
|
|
@@ -3080,16 +3080,16 @@ static int tcp_process_frto(struct sock *sk, int flag)
|
|
|
* ACK isn't duplicate nor advances window, e.g., opposite dir
|
|
|
* data, winupdate
|
|
|
*/
|
|
|
- if (!(flag&FLAG_ANY_PROGRESS) && (flag&FLAG_NOT_DUP))
|
|
|
+ if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
|
|
|
return 1;
|
|
|
|
|
|
- if (!(flag&FLAG_DATA_ACKED)) {
|
|
|
+ if (!(flag & FLAG_DATA_ACKED)) {
|
|
|
tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
|
|
|
flag);
|
|
|
return 1;
|
|
|
}
|
|
|
} else {
|
|
|
- if (!(flag&FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
|
|
|
+ if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
|
|
|
/* Prevent sending of new data. */
|
|
|
tp->snd_cwnd = min(tp->snd_cwnd,
|
|
|
tcp_packets_in_flight(tp));
|
|
|
@@ -3097,10 +3097,12 @@ static int tcp_process_frto(struct sock *sk, int flag)
|
|
|
}
|
|
|
|
|
|
if ((tp->frto_counter >= 2) &&
|
|
|
- (!(flag&FLAG_FORWARD_PROGRESS) ||
|
|
|
- ((flag&FLAG_DATA_SACKED) && !(flag&FLAG_ONLY_ORIG_SACKED)))) {
|
|
|
+ (!(flag & FLAG_FORWARD_PROGRESS) ||
|
|
|
+ ((flag & FLAG_DATA_SACKED) &&
|
|
|
+ !(flag & FLAG_ONLY_ORIG_SACKED)))) {
|
|
|
/* RFC4138 shortcoming (see comment above) */
|
|
|
- if (!(flag&FLAG_FORWARD_PROGRESS) && (flag&FLAG_NOT_DUP))
|
|
|
+ if (!(flag & FLAG_FORWARD_PROGRESS) &&
|
|
|
+ (flag & FLAG_NOT_DUP))
|
|
|
return 1;
|
|
|
|
|
|
tcp_enter_frto_loss(sk, 3, flag);
|
|
|
@@ -3166,13 +3168,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
|
tp->bytes_acked += ack - prior_snd_una;
|
|
|
else if (icsk->icsk_ca_state == TCP_CA_Loss)
|
|
|
/* we assume just one segment left network */
|
|
|
- tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
|
|
|
+ tp->bytes_acked += min(ack - prior_snd_una,
|
|
|
+ tp->mss_cache);
|
|
|
}
|
|
|
|
|
|
prior_fackets = tp->fackets_out;
|
|
|
prior_in_flight = tcp_packets_in_flight(tp);
|
|
|
|
|
|
- if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
|
|
|
+ if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
|
|
|
/* Window is constant, pure forward advance.
|
|
|
* No more checks are required.
|
|
|
* Note, we use the fact that SND.UNA>=SND.WL2.
|
|
|
@@ -3224,13 +3227,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
|
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
|
|
|
tcp_may_raise_cwnd(sk, flag))
|
|
|
tcp_cong_avoid(sk, ack, prior_in_flight);
|
|
|
- tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, flag);
|
|
|
+ tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
|
|
|
+ flag);
|
|
|
} else {
|
|
|
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
|
|
|
tcp_cong_avoid(sk, ack, prior_in_flight);
|
|
|
}
|
|
|
|
|
|
- if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
|
|
|
+ if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
|
|
|
dst_confirm(sk->sk_dst_cache);
|
|
|
|
|
|
return 1;
|
|
|
@@ -3255,22 +3259,22 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/* Look for tcp options. Normally only called on SYN and SYNACK packets.
|
|
|
* But, this can also be called on packets in the established flow when
|
|
|
* the fast version below fails.
|
|
|
*/
|
|
|
-void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
|
|
|
+void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
|
|
|
+ int estab)
|
|
|
{
|
|
|
unsigned char *ptr;
|
|
|
struct tcphdr *th = tcp_hdr(skb);
|
|
|
- int length=(th->doff*4)-sizeof(struct tcphdr);
|
|
|
+ int length = (th->doff * 4) - sizeof(struct tcphdr);
|
|
|
|
|
|
ptr = (unsigned char *)(th + 1);
|
|
|
opt_rx->saw_tstamp = 0;
|
|
|
|
|
|
while (length > 0) {
|
|
|
- int opcode=*ptr++;
|
|
|
+ int opcode = *ptr++;
|
|
|
int opsize;
|
|
|
|
|
|
switch (opcode) {
|
|
|
@@ -3359,7 +3363,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
|
|
|
static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
|
|
|
struct tcp_sock *tp)
|
|
|
{
|
|
|
- if (th->doff == sizeof(struct tcphdr)>>2) {
|
|
|
+ if (th->doff == sizeof(struct tcphdr) >> 2) {
|
|
|
tp->rx_opt.saw_tstamp = 0;
|
|
|
return 0;
|
|
|
} else if (tp->rx_opt.tstamp_ok &&
|
|
|
@@ -3444,7 +3448,8 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
|
|
|
(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
|
|
|
}
|
|
|
|
|
|
-static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb)
|
|
|
+static inline int tcp_paws_discard(const struct sock *sk,
|
|
|
+ const struct sk_buff *skb)
|
|
|
{
|
|
|
const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
|
|
|
@@ -3476,16 +3481,16 @@ static void tcp_reset(struct sock *sk)
|
|
|
{
|
|
|
/* We want the right error as BSD sees it (and indeed as we do). */
|
|
|
switch (sk->sk_state) {
|
|
|
- case TCP_SYN_SENT:
|
|
|
- sk->sk_err = ECONNREFUSED;
|
|
|
- break;
|
|
|
- case TCP_CLOSE_WAIT:
|
|
|
- sk->sk_err = EPIPE;
|
|
|
- break;
|
|
|
- case TCP_CLOSE:
|
|
|
- return;
|
|
|
- default:
|
|
|
- sk->sk_err = ECONNRESET;
|
|
|
+ case TCP_SYN_SENT:
|
|
|
+ sk->sk_err = ECONNREFUSED;
|
|
|
+ break;
|
|
|
+ case TCP_CLOSE_WAIT:
|
|
|
+ sk->sk_err = EPIPE;
|
|
|
+ break;
|
|
|
+ case TCP_CLOSE:
|
|
|
+ return;
|
|
|
+ default:
|
|
|
+ sk->sk_err = ECONNRESET;
|
|
|
}
|
|
|
|
|
|
if (!sock_flag(sk, SOCK_DEAD))
|
|
|
@@ -3518,43 +3523,43 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
|
|
|
sock_set_flag(sk, SOCK_DONE);
|
|
|
|
|
|
switch (sk->sk_state) {
|
|
|
- case TCP_SYN_RECV:
|
|
|
- case TCP_ESTABLISHED:
|
|
|
- /* Move to CLOSE_WAIT */
|
|
|
- tcp_set_state(sk, TCP_CLOSE_WAIT);
|
|
|
- inet_csk(sk)->icsk_ack.pingpong = 1;
|
|
|
- break;
|
|
|
+ case TCP_SYN_RECV:
|
|
|
+ case TCP_ESTABLISHED:
|
|
|
+ /* Move to CLOSE_WAIT */
|
|
|
+ tcp_set_state(sk, TCP_CLOSE_WAIT);
|
|
|
+ inet_csk(sk)->icsk_ack.pingpong = 1;
|
|
|
+ break;
|
|
|
|
|
|
- case TCP_CLOSE_WAIT:
|
|
|
- case TCP_CLOSING:
|
|
|
- /* Received a retransmission of the FIN, do
|
|
|
- * nothing.
|
|
|
- */
|
|
|
- break;
|
|
|
- case TCP_LAST_ACK:
|
|
|
- /* RFC793: Remain in the LAST-ACK state. */
|
|
|
- break;
|
|
|
+ case TCP_CLOSE_WAIT:
|
|
|
+ case TCP_CLOSING:
|
|
|
+ /* Received a retransmission of the FIN, do
|
|
|
+ * nothing.
|
|
|
+ */
|
|
|
+ break;
|
|
|
+ case TCP_LAST_ACK:
|
|
|
+ /* RFC793: Remain in the LAST-ACK state. */
|
|
|
+ break;
|
|
|
|
|
|
- case TCP_FIN_WAIT1:
|
|
|
- /* This case occurs when a simultaneous close
|
|
|
- * happens, we must ack the received FIN and
|
|
|
- * enter the CLOSING state.
|
|
|
- */
|
|
|
- tcp_send_ack(sk);
|
|
|
- tcp_set_state(sk, TCP_CLOSING);
|
|
|
- break;
|
|
|
- case TCP_FIN_WAIT2:
|
|
|
- /* Received a FIN -- send ACK and enter TIME_WAIT. */
|
|
|
- tcp_send_ack(sk);
|
|
|
- tcp_time_wait(sk, TCP_TIME_WAIT, 0);
|
|
|
- break;
|
|
|
- default:
|
|
|
- /* Only TCP_LISTEN and TCP_CLOSE are left, in these
|
|
|
- * cases we should never reach this piece of code.
|
|
|
- */
|
|
|
- printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
|
|
|
- __FUNCTION__, sk->sk_state);
|
|
|
- break;
|
|
|
+ case TCP_FIN_WAIT1:
|
|
|
+ /* This case occurs when a simultaneous close
|
|
|
+ * happens, we must ack the received FIN and
|
|
|
+ * enter the CLOSING state.
|
|
|
+ */
|
|
|
+ tcp_send_ack(sk);
|
|
|
+ tcp_set_state(sk, TCP_CLOSING);
|
|
|
+ break;
|
|
|
+ case TCP_FIN_WAIT2:
|
|
|
+ /* Received a FIN -- send ACK and enter TIME_WAIT. */
|
|
|
+ tcp_send_ack(sk);
|
|
|
+ tcp_time_wait(sk, TCP_TIME_WAIT, 0);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ /* Only TCP_LISTEN and TCP_CLOSE are left, in these
|
|
|
+ * cases we should never reach this piece of code.
|
|
|
+ */
|
|
|
+ printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
|
|
|
+ __FUNCTION__, sk->sk_state);
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
/* It _is_ possible, that we have something out-of-order _after_ FIN.
|
|
|
@@ -3577,7 +3582,8 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
|
|
|
+static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
|
|
|
+ u32 end_seq)
|
|
|
{
|
|
|
if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
|
|
|
if (before(seq, sp->start_seq))
|
|
|
@@ -3600,7 +3606,8 @@ static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
|
|
|
tp->rx_opt.dsack = 1;
|
|
|
tp->duplicate_sack[0].start_seq = seq;
|
|
|
tp->duplicate_sack[0].end_seq = end_seq;
|
|
|
- tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 4 - tp->rx_opt.tstamp_ok);
|
|
|
+ tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1,
|
|
|
+ 4 - tp->rx_opt.tstamp_ok);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -3640,12 +3647,12 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
|
|
|
{
|
|
|
int this_sack;
|
|
|
struct tcp_sack_block *sp = &tp->selective_acks[0];
|
|
|
- struct tcp_sack_block *swalk = sp+1;
|
|
|
+ struct tcp_sack_block *swalk = sp + 1;
|
|
|
|
|
|
/* See if the recent change to the first SACK eats into
|
|
|
* or hits the sequence space of other SACK blocks, if so coalesce.
|
|
|
*/
|
|
|
- for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; ) {
|
|
|
+ for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) {
|
|
|
if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
|
|
|
int i;
|
|
|
|
|
|
@@ -3653,16 +3660,19 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
|
|
|
* Decrease num_sacks.
|
|
|
*/
|
|
|
tp->rx_opt.num_sacks--;
|
|
|
- tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
|
|
|
- for (i=this_sack; i < tp->rx_opt.num_sacks; i++)
|
|
|
- sp[i] = sp[i+1];
|
|
|
+ tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks +
|
|
|
+ tp->rx_opt.dsack,
|
|
|
+ 4 - tp->rx_opt.tstamp_ok);
|
|
|
+ for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
|
|
|
+ sp[i] = sp[i + 1];
|
|
|
continue;
|
|
|
}
|
|
|
this_sack++, swalk++;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2)
|
|
|
+static inline void tcp_sack_swap(struct tcp_sack_block *sack1,
|
|
|
+ struct tcp_sack_block *sack2)
|
|
|
{
|
|
|
__u32 tmp;
|
|
|
|
|
|
@@ -3685,11 +3695,11 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
|
|
|
if (!cur_sacks)
|
|
|
goto new_sack;
|
|
|
|
|
|
- for (this_sack=0; this_sack<cur_sacks; this_sack++, sp++) {
|
|
|
+ for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) {
|
|
|
if (tcp_sack_extend(sp, seq, end_seq)) {
|
|
|
/* Rotate this_sack to the first one. */
|
|
|
- for (; this_sack>0; this_sack--, sp--)
|
|
|
- tcp_sack_swap(sp, sp-1);
|
|
|
+ for (; this_sack > 0; this_sack--, sp--)
|
|
|
+ tcp_sack_swap(sp, sp - 1);
|
|
|
if (cur_sacks > 1)
|
|
|
tcp_sack_maybe_coalesce(tp);
|
|
|
return;
|
|
|
@@ -3708,14 +3718,15 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
|
|
|
sp--;
|
|
|
}
|
|
|
for (; this_sack > 0; this_sack--, sp--)
|
|
|
- *sp = *(sp-1);
|
|
|
+ *sp = *(sp - 1);
|
|
|
|
|
|
new_sack:
|
|
|
/* Build the new head SACK, and we're done. */
|
|
|
sp->start_seq = seq;
|
|
|
sp->end_seq = end_seq;
|
|
|
tp->rx_opt.num_sacks++;
|
|
|
- tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
|
|
|
+ tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack,
|
|
|
+ 4 - tp->rx_opt.tstamp_ok);
|
|
|
}
|
|
|
|
|
|
/* RCV.NXT advances, some SACKs should be eaten. */
|
|
|
@@ -3733,7 +3744,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- for (this_sack = 0; this_sack < num_sacks; ) {
|
|
|
+ for (this_sack = 0; this_sack < num_sacks;) {
|
|
|
/* Check if the start of the sack is covered by RCV.NXT. */
|
|
|
if (!before(tp->rcv_nxt, sp->start_seq)) {
|
|
|
int i;
|
|
|
@@ -3752,7 +3763,9 @@ static void tcp_sack_remove(struct tcp_sock *tp)
|
|
|
}
|
|
|
if (num_sacks != tp->rx_opt.num_sacks) {
|
|
|
tp->rx_opt.num_sacks = num_sacks;
|
|
|
- tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
|
|
|
+ tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks +
|
|
|
+ tp->rx_opt.dsack,
|
|
|
+ 4 - tp->rx_opt.tstamp_ok);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -3805,14 +3818,14 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
|
|
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
|
|
|
goto drop;
|
|
|
|
|
|
- __skb_pull(skb, th->doff*4);
|
|
|
+ __skb_pull(skb, th->doff * 4);
|
|
|
|
|
|
TCP_ECN_accept_cwr(tp, skb);
|
|
|
|
|
|
if (tp->rx_opt.dsack) {
|
|
|
tp->rx_opt.dsack = 0;
|
|
|
tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks,
|
|
|
- 4 - tp->rx_opt.tstamp_ok);
|
|
|
+ 4 - tp->rx_opt.tstamp_ok);
|
|
|
}
|
|
|
|
|
|
/* Queue data for delivery to the user.
|
|
|
@@ -3828,7 +3841,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
|
|
tp->copied_seq == tp->rcv_nxt && tp->ucopy.len &&
|
|
|
sock_owned_by_user(sk) && !tp->urg_data) {
|
|
|
int chunk = min_t(unsigned int, skb->len,
|
|
|
- tp->ucopy.len);
|
|
|
+ tp->ucopy.len);
|
|
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
|
|
@@ -3945,7 +3958,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
|
|
tp->selective_acks[0].end_seq =
|
|
|
TCP_SKB_CB(skb)->end_seq;
|
|
|
}
|
|
|
- __skb_queue_head(&tp->out_of_order_queue,skb);
|
|
|
+ __skb_queue_head(&tp->out_of_order_queue, skb);
|
|
|
} else {
|
|
|
struct sk_buff *skb1 = tp->out_of_order_queue.prev;
|
|
|
u32 seq = TCP_SKB_CB(skb)->seq;
|
|
|
@@ -3968,10 +3981,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
|
|
if (!after(TCP_SKB_CB(skb1)->seq, seq))
|
|
|
break;
|
|
|
} while ((skb1 = skb1->prev) !=
|
|
|
- (struct sk_buff*)&tp->out_of_order_queue);
|
|
|
+ (struct sk_buff *)&tp->out_of_order_queue);
|
|
|
|
|
|
/* Do skb overlap to previous one? */
|
|
|
- if (skb1 != (struct sk_buff*)&tp->out_of_order_queue &&
|
|
|
+ if (skb1 != (struct sk_buff *)&tp->out_of_order_queue &&
|
|
|
before(seq, TCP_SKB_CB(skb1)->end_seq)) {
|
|
|
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
|
|
/* All the bits are present. Drop. */
|
|
|
@@ -3981,7 +3994,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
|
|
}
|
|
|
if (after(seq, TCP_SKB_CB(skb1)->seq)) {
|
|
|
/* Partial overlap. */
|
|
|
- tcp_dsack_set(tp, seq, TCP_SKB_CB(skb1)->end_seq);
|
|
|
+ tcp_dsack_set(tp, seq,
|
|
|
+ TCP_SKB_CB(skb1)->end_seq);
|
|
|
} else {
|
|
|
skb1 = skb1->prev;
|
|
|
}
|
|
|
@@ -3990,15 +4004,17 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
|
|
|
|
|
/* And clean segments covered by new one as whole. */
|
|
|
while ((skb1 = skb->next) !=
|
|
|
- (struct sk_buff*)&tp->out_of_order_queue &&
|
|
|
+ (struct sk_buff *)&tp->out_of_order_queue &&
|
|
|
after(end_seq, TCP_SKB_CB(skb1)->seq)) {
|
|
|
- if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
|
|
- tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
|
|
|
- break;
|
|
|
- }
|
|
|
- __skb_unlink(skb1, &tp->out_of_order_queue);
|
|
|
- tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
|
|
|
- __kfree_skb(skb1);
|
|
|
+ if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
|
|
+ tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq,
|
|
|
+ end_seq);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ __skb_unlink(skb1, &tp->out_of_order_queue);
|
|
|
+ tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq,
|
|
|
+ TCP_SKB_CB(skb1)->end_seq);
|
|
|
+ __kfree_skb(skb1);
|
|
|
}
|
|
|
|
|
|
add_sack:
|
|
|
@@ -4021,7 +4037,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
|
|
|
|
|
|
/* First, check that queue is collapsible and find
|
|
|
* the point where collapsing can be useful. */
|
|
|
- for (skb = head; skb != tail; ) {
|
|
|
+ for (skb = head; skb != tail;) {
|
|
|
/* No new bits? It is possible on ofo queue. */
|
|
|
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
|
|
|
struct sk_buff *next = skb->next;
|
|
|
@@ -4059,9 +4075,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
|
|
|
/* Too big header? This can happen with IPv6. */
|
|
|
if (copy < 0)
|
|
|
return;
|
|
|
- if (end-start < copy)
|
|
|
- copy = end-start;
|
|
|
- nskb = alloc_skb(copy+header, GFP_ATOMIC);
|
|
|
+ if (end - start < copy)
|
|
|
+ copy = end - start;
|
|
|
+ nskb = alloc_skb(copy + header, GFP_ATOMIC);
|
|
|
if (!nskb)
|
|
|
return;
|
|
|
|
|
|
@@ -4171,7 +4187,7 @@ static int tcp_prune_queue(struct sock *sk)
|
|
|
tcp_collapse_ofo_queue(sk);
|
|
|
tcp_collapse(sk, &sk->sk_receive_queue,
|
|
|
sk->sk_receive_queue.next,
|
|
|
- (struct sk_buff*)&sk->sk_receive_queue,
|
|
|
+ (struct sk_buff *)&sk->sk_receive_queue,
|
|
|
tp->copied_seq, tp->rcv_nxt);
|
|
|
sk_mem_reclaim(sk);
|
|
|
|
|
|
@@ -4210,7 +4226,6 @@ static int tcp_prune_queue(struct sock *sk)
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
|
|
|
* As additional protections, we do not touch cwnd in retransmission phases,
|
|
|
* and if application hit its sndbuf limit recently.
|
|
|
@@ -4272,8 +4287,8 @@ static void tcp_new_space(struct sock *sk)
|
|
|
int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
|
|
|
MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
|
|
|
demanded = max_t(unsigned int, tp->snd_cwnd,
|
|
|
- tp->reordering + 1);
|
|
|
- sndmem *= 2*demanded;
|
|
|
+ tp->reordering + 1);
|
|
|
+ sndmem *= 2 * demanded;
|
|
|
if (sndmem > sk->sk_sndbuf)
|
|
|
sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
|
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
|
@@ -4314,8 +4329,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
|
|
|
/* We ACK each frame or... */
|
|
|
tcp_in_quickack_mode(sk) ||
|
|
|
/* We have out of order data. */
|
|
|
- (ofo_possible &&
|
|
|
- skb_peek(&tp->out_of_order_queue))) {
|
|
|
+ (ofo_possible && skb_peek(&tp->out_of_order_queue))) {
|
|
|
/* Then ack it now */
|
|
|
tcp_send_ack(sk);
|
|
|
} else {
|
|
|
@@ -4343,7 +4357,7 @@ static inline void tcp_ack_snd_check(struct sock *sk)
|
|
|
* either form (or just set the sysctl tcp_stdurg).
|
|
|
*/
|
|
|
|
|
|
-static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
|
|
|
+static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
u32 ptr = ntohs(th->urg_ptr);
|
|
|
@@ -4392,8 +4406,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
|
|
|
* buggy users.
|
|
|
*/
|
|
|
if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
|
|
|
- !sock_flag(sk, SOCK_URGINLINE) &&
|
|
|
- tp->copied_seq != tp->rcv_nxt) {
|
|
|
+ !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
|
|
|
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
|
|
|
tp->copied_seq++;
|
|
|
if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
|
|
|
@@ -4402,8 +4415,8 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- tp->urg_data = TCP_URG_NOTYET;
|
|
|
- tp->urg_seq = ptr;
|
|
|
+ tp->urg_data = TCP_URG_NOTYET;
|
|
|
+ tp->urg_seq = ptr;
|
|
|
|
|
|
/* Disable header prediction. */
|
|
|
tp->pred_flags = 0;
|
|
|
@@ -4416,7 +4429,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
|
|
|
|
|
|
/* Check if we get a new urgent pointer - normally not. */
|
|
|
if (th->urg)
|
|
|
- tcp_check_urg(sk,th);
|
|
|
+ tcp_check_urg(sk, th);
|
|
|
|
|
|
/* Do we wait for any urgent data? - normally not... */
|
|
|
if (tp->urg_data == TCP_URG_NOTYET) {
|
|
|
@@ -4458,7 +4471,8 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
|
|
|
+static __sum16 __tcp_checksum_complete_user(struct sock *sk,
|
|
|
+ struct sk_buff *skb)
|
|
|
{
|
|
|
__sum16 result;
|
|
|
|
|
|
@@ -4472,14 +4486,16 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
|
|
|
+static inline int tcp_checksum_complete_user(struct sock *sk,
|
|
|
+ struct sk_buff *skb)
|
|
|
{
|
|
|
return !skb_csum_unnecessary(skb) &&
|
|
|
- __tcp_checksum_complete_user(sk, skb);
|
|
|
+ __tcp_checksum_complete_user(sk, skb);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NET_DMA
|
|
|
-static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen)
|
|
|
+static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
|
|
|
+ int hlen)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
int chunk = skb->len - hlen;
|
|
|
@@ -4495,7 +4511,9 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen
|
|
|
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
|
|
|
|
|
|
dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
|
|
|
- skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list);
|
|
|
+ skb, hlen,
|
|
|
+ tp->ucopy.iov, chunk,
|
|
|
+ tp->ucopy.pinned_list);
|
|
|
|
|
|
if (dma_cookie < 0)
|
|
|
goto out;
|
|
|
@@ -4577,7 +4595,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|
|
*/
|
|
|
|
|
|
if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
|
|
|
- TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
|
|
|
+ TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
|
|
|
int tcp_header_len = tp->tcp_header_len;
|
|
|
|
|
|
/* Timestamp header prediction: tcp_header_len
|
|
|
@@ -4646,7 +4664,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|
|
eaten = 1;
|
|
|
}
|
|
|
#endif
|
|
|
- if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) {
|
|
|
+ if (tp->ucopy.task == current &&
|
|
|
+ sock_owned_by_user(sk) && !copied_early) {
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
|
|
if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
|
|
|
@@ -4693,7 +4712,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
|
|
|
|
|
|
/* Bulk data transfer: receiver */
|
|
|
- __skb_pull(skb,tcp_header_len);
|
|
|
+ __skb_pull(skb, tcp_header_len);
|
|
|
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
|
|
skb_set_owner_r(skb, sk);
|
|
|
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
|
|
@@ -4725,7 +4744,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|
|
}
|
|
|
|
|
|
slow_path:
|
|
|
- if (len < (th->doff<<2) || tcp_checksum_complete_user(sk, skb))
|
|
|
+ if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
|
|
|
goto csum_error;
|
|
|
|
|
|
/*
|
|
|
@@ -4975,7 +4994,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
|
|
}
|
|
|
|
|
|
/* PAWS check. */
|
|
|
- if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_check(&tp->rx_opt, 0))
|
|
|
+ if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
|
|
|
+ tcp_paws_check(&tp->rx_opt, 0))
|
|
|
goto discard_and_undo;
|
|
|
|
|
|
if (th->syn) {
|
|
|
@@ -5010,7 +5030,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
|
|
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
|
|
|
tcp_initialize_rcv_mss(sk);
|
|
|
|
|
|
-
|
|
|
tcp_send_synack(sk);
|
|
|
#if 0
|
|
|
/* Note, we could accept data and URG from this segment.
|
|
|
@@ -5042,7 +5061,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/*
|
|
|
* This function implements the receiving procedure of RFC 793 for
|
|
|
* all states except ESTABLISHED and TIME_WAIT.
|
|
|
@@ -5164,7 +5182,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
|
|
*/
|
|
|
if (sk->sk_socket)
|
|
|
sk_wake_async(sk,
|
|
|
- SOCK_WAKE_IO, POLL_OUT);
|
|
|
+ SOCK_WAKE_IO, POLL_OUT);
|
|
|
|
|
|
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
|
|
|
tp->snd_wnd = ntohs(th->window) <<
|
|
|
@@ -5176,8 +5194,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
|
|
* and does not calculate rtt.
|
|
|
* Fix it at least with timestamps.
|
|
|
*/
|
|
|
- if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
|
|
|
- !tp->srtt)
|
|
|
+ if (tp->rx_opt.saw_tstamp &&
|
|
|
+ tp->rx_opt.rcv_tsecr && !tp->srtt)
|
|
|
tcp_ack_saw_tstamp(sk, 0);
|
|
|
|
|
|
if (tp->rx_opt.tstamp_ok)
|