|
@@ -904,8 +904,6 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
|
|
|
tcp_disable_fack(tp);
|
|
|
}
|
|
|
|
|
|
- if (metric > 0)
|
|
|
- tcp_disable_early_retrans(tp);
|
|
|
tp->rack.reord = 1;
|
|
|
}
|
|
|
|
|
@@ -2054,30 +2052,6 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
|
|
|
return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
|
|
|
}
|
|
|
|
|
|
-static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
|
|
|
-{
|
|
|
- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
- unsigned long delay;
|
|
|
-
|
|
|
- /* Delay early retransmit and entering fast recovery for
|
|
|
- * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples
|
|
|
- * available, or RTO is scheduled to fire first.
|
|
|
- */
|
|
|
- if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 ||
|
|
|
- (flag & FLAG_ECE) || !tp->srtt_us)
|
|
|
- return false;
|
|
|
-
|
|
|
- delay = max(usecs_to_jiffies(tp->srtt_us >> 5),
|
|
|
- msecs_to_jiffies(2));
|
|
|
-
|
|
|
- if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
|
|
|
- return false;
|
|
|
-
|
|
|
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
|
|
|
- TCP_RTO_MAX);
|
|
|
- return true;
|
|
|
-}
|
|
|
-
|
|
|
/* Linux NewReno/SACK/FACK/ECN state machine.
|
|
|
* --------------------------------------
|
|
|
*
|
|
@@ -2221,16 +2195,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
|
|
|
tcp_is_sack(tp) && !tcp_send_head(sk))
|
|
|
return true;
|
|
|
|
|
|
- /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious
|
|
|
- * retransmissions due to small network reorderings, we implement
|
|
|
- * Mitigation A.3 in the RFC and delay the retransmission for a short
|
|
|
- * interval if appropriate.
|
|
|
- */
|
|
|
- if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out &&
|
|
|
- (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) &&
|
|
|
- !tcp_may_send_now(sk))
|
|
|
- return !tcp_pause_early_retransmit(sk, flag);
|
|
|
-
|
|
|
return false;
|
|
|
}
|
|
|
|
|
@@ -3050,8 +3014,7 @@ void tcp_rearm_rto(struct sock *sk)
|
|
|
} else {
|
|
|
u32 rto = inet_csk(sk)->icsk_rto;
|
|
|
/* Offset the time elapsed after installing regular RTO */
|
|
|
- if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
|
|
|
- icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
|
|
|
+ if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
|
|
|
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
|
|
|
struct sk_buff *skb = tcp_write_queue_head(sk);
|
|
|
const u32 rto_time_stamp =
|
|
@@ -3068,24 +3031,6 @@ void tcp_rearm_rto(struct sock *sk)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/* This function is called when the delayed ER timer fires. TCP enters
|
|
|
- * fast recovery and performs fast-retransmit.
|
|
|
- */
|
|
|
-void tcp_resume_early_retransmit(struct sock *sk)
|
|
|
-{
|
|
|
- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
-
|
|
|
- tcp_rearm_rto(sk);
|
|
|
-
|
|
|
- /* Stop if ER is disabled after the delayed ER timer is scheduled */
|
|
|
- if (!tp->do_early_retrans)
|
|
|
- return;
|
|
|
-
|
|
|
- tcp_enter_recovery(sk, false);
|
|
|
- tcp_update_scoreboard(sk, 1);
|
|
|
- tcp_xmit_retransmit_queue(sk);
|
|
|
-}
|
|
|
-
|
|
|
/* If we get here, the whole TSO packet has not been acked. */
|
|
|
static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
|
|
|
{
|
|
@@ -3651,8 +3596,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|
|
|
|
|
skb_mstamp_get(&sack_state.ack_time);
|
|
|
|
|
|
- if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
|
|
|
- icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
|
|
|
+ if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
|
|
|
tcp_rearm_rto(sk);
|
|
|
|
|
|
if (after(ack, prior_snd_una)) {
|