|
@@ -1942,6 +1942,8 @@ void tcp_enter_loss(struct sock *sk)
|
|
|
if (is_reneg) {
|
|
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
|
|
|
tp->sacked_out = 0;
|
|
|
+ /* Mark SACK reneging until we recover from this loss event. */
|
|
|
+ tp->is_sack_reneg = 1;
|
|
|
}
|
|
|
tcp_clear_all_retrans_hints(tp);
|
|
|
|
|
@@ -2365,6 +2367,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
|
|
|
return true;
|
|
|
}
|
|
|
tcp_set_ca_state(sk, TCP_CA_Open);
|
|
|
+ tp->is_sack_reneg = 0;
|
|
|
return false;
|
|
|
}
|
|
|
|
|
@@ -2398,8 +2401,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
|
|
NET_INC_STATS(sock_net(sk),
|
|
|
LINUX_MIB_TCPSPURIOUSRTOS);
|
|
|
inet_csk(sk)->icsk_retransmits = 0;
|
|
|
- if (frto_undo || tcp_is_sack(tp))
|
|
|
+ if (frto_undo || tcp_is_sack(tp)) {
|
|
|
tcp_set_ca_state(sk, TCP_CA_Open);
|
|
|
+ tp->is_sack_reneg = 0;
|
|
|
+ }
|
|
|
return true;
|
|
|
}
|
|
|
return false;
|
|
@@ -3496,6 +3501,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|
|
struct tcp_sacktag_state sack_state;
|
|
|
struct rate_sample rs = { .prior_delivered = 0 };
|
|
|
u32 prior_snd_una = tp->snd_una;
|
|
|
+ bool is_sack_reneg = tp->is_sack_reneg;
|
|
|
u32 ack_seq = TCP_SKB_CB(skb)->seq;
|
|
|
u32 ack = TCP_SKB_CB(skb)->ack_seq;
|
|
|
bool is_dupack = false;
|
|
@@ -3612,7 +3618,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|
|
|
|
|
delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
|
|
|
lost = tp->lost - lost; /* freshly marked lost */
|
|
|
- tcp_rate_gen(sk, delivered, lost, sack_state.rate);
|
|
|
+ tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
|
|
|
tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
|
|
|
tcp_xmit_recovery(sk, rexmit);
|
|
|
return 1;
|