|
@@ -1917,18 +1917,43 @@ static inline void tcp_init_undo(struct tcp_sock *tp)
|
|
tp->undo_retrans = tp->retrans_out ? : -1;
|
|
tp->undo_retrans = tp->retrans_out ? : -1;
|
|
}
|
|
}
|
|
|
|
|
|
-/* Enter Loss state. If we detect SACK reneging, forget all SACK information
|
|
|
|
|
|
+/* If we detect SACK reneging, forget all SACK information
|
|
* and reset tags completely, otherwise preserve SACKs. If receiver
|
|
* and reset tags completely, otherwise preserve SACKs. If receiver
|
|
* dropped its ofo queue, we will know this due to reneging detection.
|
|
* dropped its ofo queue, we will know this due to reneging detection.
|
|
*/
|
|
*/
|
|
|
|
+static void tcp_timeout_mark_lost(struct sock *sk)
|
|
|
|
+{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+ struct sk_buff *skb;
|
|
|
|
+ bool is_reneg; /* is receiver reneging on SACKs? */
|
|
|
|
+
|
|
|
|
+ skb = tcp_rtx_queue_head(sk);
|
|
|
|
+ is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
|
|
|
|
+ if (is_reneg) {
|
|
|
|
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
|
|
|
|
+ tp->sacked_out = 0;
|
|
|
|
+ /* Mark SACK reneging until we recover from this loss event. */
|
|
|
|
+ tp->is_sack_reneg = 1;
|
|
|
|
+ } else if (tcp_is_reno(tp)) {
|
|
|
|
+ tcp_reset_reno_sack(tp);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ skb_rbtree_walk_from(skb) {
|
|
|
|
+ if (is_reneg)
|
|
|
|
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
|
|
|
|
+ tcp_mark_skb_lost(sk, skb);
|
|
|
|
+ }
|
|
|
|
+ tcp_verify_left_out(tp);
|
|
|
|
+ tcp_clear_all_retrans_hints(tp);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Enter Loss state. */
|
|
void tcp_enter_loss(struct sock *sk)
|
|
void tcp_enter_loss(struct sock *sk)
|
|
{
|
|
{
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct net *net = sock_net(sk);
|
|
struct net *net = sock_net(sk);
|
|
- struct sk_buff *skb;
|
|
|
|
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
|
|
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
|
|
- bool is_reneg; /* is receiver reneging on SACKs? */
|
|
|
|
|
|
|
|
/* Reduce ssthresh if it has not yet been made inside this window. */
|
|
/* Reduce ssthresh if it has not yet been made inside this window. */
|
|
if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
|
|
if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
|
|
@@ -1944,24 +1969,7 @@ void tcp_enter_loss(struct sock *sk)
|
|
tp->snd_cwnd_cnt = 0;
|
|
tp->snd_cwnd_cnt = 0;
|
|
tp->snd_cwnd_stamp = tcp_jiffies32;
|
|
tp->snd_cwnd_stamp = tcp_jiffies32;
|
|
|
|
|
|
- if (tcp_is_reno(tp))
|
|
|
|
- tcp_reset_reno_sack(tp);
|
|
|
|
-
|
|
|
|
- skb = tcp_rtx_queue_head(sk);
|
|
|
|
- is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
|
|
|
|
- if (is_reneg) {
|
|
|
|
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
|
|
|
|
- tp->sacked_out = 0;
|
|
|
|
- /* Mark SACK reneging until we recover from this loss event. */
|
|
|
|
- tp->is_sack_reneg = 1;
|
|
|
|
- }
|
|
|
|
- skb_rbtree_walk_from(skb) {
|
|
|
|
- if (is_reneg)
|
|
|
|
- TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
|
|
|
|
- tcp_mark_skb_lost(sk, skb);
|
|
|
|
- }
|
|
|
|
- tcp_verify_left_out(tp);
|
|
|
|
- tcp_clear_all_retrans_hints(tp);
|
|
|
|
|
|
+ tcp_timeout_mark_lost(sk);
|
|
|
|
|
|
/* Timeout in disordered state after receiving substantial DUPACKs
|
|
/* Timeout in disordered state after receiving substantial DUPACKs
|
|
* suggests that the degree of reordering is over-estimated.
|
|
* suggests that the degree of reordering is over-estimated.
|