|
@@ -2475,7 +2475,7 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
|
|
* losses and/or application stalls), do not perform any further cwnd
|
|
|
* reductions, but instead slow start up to ssthresh.
|
|
|
*/
|
|
|
-static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
|
|
|
+static void tcp_init_cwnd_reduction(struct sock *sk)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
@@ -2485,8 +2485,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
|
|
|
tp->prior_cwnd = tp->snd_cwnd;
|
|
|
tp->prr_delivered = 0;
|
|
|
tp->prr_out = 0;
|
|
|
- if (set_ssthresh)
|
|
|
- tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
|
|
|
+ tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
|
|
|
TCP_ECN_queue_cwr(tp);
|
|
|
}
|
|
|
|
|
@@ -2528,14 +2527,14 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
|
|
|
}
|
|
|
|
|
|
/* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
|
|
|
-void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
|
|
|
+void tcp_enter_cwr(struct sock *sk)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
tp->prior_ssthresh = 0;
|
|
|
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
|
|
|
tp->undo_marker = 0;
|
|
|
- tcp_init_cwnd_reduction(sk, set_ssthresh);
|
|
|
+ tcp_init_cwnd_reduction(sk);
|
|
|
tcp_set_ca_state(sk, TCP_CA_CWR);
|
|
|
}
|
|
|
}
|
|
@@ -2564,7 +2563,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
|
|
|
tp->retrans_stamp = 0;
|
|
|
|
|
|
if (flag & FLAG_ECE)
|
|
|
- tcp_enter_cwr(sk, 1);
|
|
|
+ tcp_enter_cwr(sk);
|
|
|
|
|
|
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
|
|
|
tcp_try_keep_open(sk);
|
|
@@ -2670,7 +2669,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
|
|
|
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
|
|
|
if (!ece_ack)
|
|
|
tp->prior_ssthresh = tcp_current_ssthresh(sk);
|
|
|
- tcp_init_cwnd_reduction(sk, true);
|
|
|
+ tcp_init_cwnd_reduction(sk);
|
|
|
}
|
|
|
tcp_set_ca_state(sk, TCP_CA_Recovery);
|
|
|
}
|
|
@@ -3346,7 +3345,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
|
|
|
tp->tlp_high_seq = 0;
|
|
|
/* Don't reduce cwnd if DSACK arrives for TLP retrans. */
|
|
|
if (!(flag & FLAG_DSACKING_ACK)) {
|
|
|
- tcp_init_cwnd_reduction(sk, true);
|
|
|
+ tcp_init_cwnd_reduction(sk);
|
|
|
tcp_set_ca_state(sk, TCP_CA_CWR);
|
|
|
tcp_end_cwnd_reduction(sk);
|
|
|
tcp_try_keep_open(sk);
|