|
@@ -2476,15 +2476,14 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
-/* The cwnd reduction in CWR and Recovery use the PRR algorithm
|
|
|
|
- * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
|
|
|
|
|
|
+/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
|
|
* It computes the number of packets to send (sndcnt) based on packets newly
|
|
* It computes the number of packets to send (sndcnt) based on packets newly
|
|
* delivered:
|
|
* delivered:
|
|
* 1) If the packets in flight is larger than ssthresh, PRR spreads the
|
|
* 1) If the packets in flight is larger than ssthresh, PRR spreads the
|
|
* cwnd reductions across a full RTT.
|
|
* cwnd reductions across a full RTT.
|
|
- * 2) If packets in flight is lower than ssthresh (such as due to excess
|
|
|
|
- * losses and/or application stalls), do not perform any further cwnd
|
|
|
|
- * reductions, but instead slow start up to ssthresh.
|
|
|
|
|
|
+ * 2) Otherwise PRR uses packet conservation to send as much as delivered.
|
|
|
|
+ * But when the retransmits are acked without further losses, PRR
|
|
|
|
+ * slow starts cwnd up to ssthresh to speed up the recovery.
|
|
*/
|
|
*/
|
|
static void tcp_init_cwnd_reduction(struct sock *sk)
|
|
static void tcp_init_cwnd_reduction(struct sock *sk)
|
|
{
|
|
{
|
|
@@ -2501,7 +2500,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
|
|
}
|
|
}
|
|
|
|
|
|
static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
|
|
static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
|
|
- int fast_rexmit)
|
|
|
|
|
|
+ int fast_rexmit, int flag)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
int sndcnt = 0;
|
|
int sndcnt = 0;
|
|
@@ -2510,16 +2509,18 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
|
|
(tp->packets_out - tp->sacked_out);
|
|
(tp->packets_out - tp->sacked_out);
|
|
|
|
|
|
tp->prr_delivered += newly_acked_sacked;
|
|
tp->prr_delivered += newly_acked_sacked;
|
|
- if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
|
|
|
|
|
|
+ if (delta < 0) {
|
|
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
|
|
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
|
|
tp->prior_cwnd - 1;
|
|
tp->prior_cwnd - 1;
|
|
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
|
|
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
|
|
- } else {
|
|
|
|
|
|
+ } else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
|
|
|
|
+ !(flag & FLAG_LOST_RETRANS)) {
|
|
sndcnt = min_t(int, delta,
|
|
sndcnt = min_t(int, delta,
|
|
max_t(int, tp->prr_delivered - tp->prr_out,
|
|
max_t(int, tp->prr_delivered - tp->prr_out,
|
|
newly_acked_sacked) + 1);
|
|
newly_acked_sacked) + 1);
|
|
|
|
+ } else {
|
|
|
|
+ sndcnt = min(delta, newly_acked_sacked);
|
|
}
|
|
}
|
|
-
|
|
|
|
sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
|
|
sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
|
|
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
|
|
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
|
|
}
|
|
}
|
|
@@ -2580,7 +2581,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
|
|
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
|
|
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
|
|
tcp_try_keep_open(sk);
|
|
tcp_try_keep_open(sk);
|
|
} else {
|
|
} else {
|
|
- tcp_cwnd_reduction(sk, prior_unsacked, 0);
|
|
|
|
|
|
+ tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2737,7 +2738,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
|
|
|
|
|
|
/* Undo during fast recovery after partial ACK. */
|
|
/* Undo during fast recovery after partial ACK. */
|
|
static bool tcp_try_undo_partial(struct sock *sk, const int acked,
|
|
static bool tcp_try_undo_partial(struct sock *sk, const int acked,
|
|
- const int prior_unsacked)
|
|
|
|
|
|
+ const int prior_unsacked, int flag)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
@@ -2753,7 +2754,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
|
|
* mark more packets lost or retransmit more.
|
|
* mark more packets lost or retransmit more.
|
|
*/
|
|
*/
|
|
if (tp->retrans_out) {
|
|
if (tp->retrans_out) {
|
|
- tcp_cwnd_reduction(sk, prior_unsacked, 0);
|
|
|
|
|
|
+ tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2840,7 +2841,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
|
if (tcp_is_reno(tp) && is_dupack)
|
|
if (tcp_is_reno(tp) && is_dupack)
|
|
tcp_add_reno_sack(sk);
|
|
tcp_add_reno_sack(sk);
|
|
} else {
|
|
} else {
|
|
- if (tcp_try_undo_partial(sk, acked, prior_unsacked))
|
|
|
|
|
|
+ if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
|
|
return;
|
|
return;
|
|
/* Partial ACK arrived. Force fast retransmit. */
|
|
/* Partial ACK arrived. Force fast retransmit. */
|
|
do_lost = tcp_is_reno(tp) ||
|
|
do_lost = tcp_is_reno(tp) ||
|
|
@@ -2891,7 +2892,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
|
|
|
|
|
if (do_lost)
|
|
if (do_lost)
|
|
tcp_update_scoreboard(sk, fast_rexmit);
|
|
tcp_update_scoreboard(sk, fast_rexmit);
|
|
- tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit);
|
|
|
|
|
|
+ tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
|
|
tcp_xmit_retransmit_queue(sk);
|
|
tcp_xmit_retransmit_queue(sk);
|
|
}
|
|
}
|
|
|
|
|