|
@@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
|
|
|
|
|
|
/* Undo procedures. */
|
|
|
|
|
|
+/* We can clear retrans_stamp when there are no retransmissions in the
|
|
|
+ * window. It would seem that it is trivially available for us in
|
|
|
+ * tp->retrans_out, however, that kind of assumptions doesn't consider
|
|
|
+ * what will happen if errors occur when sending retransmission for the
|
|
|
+ * second time. ...It could the that such segment has only
|
|
|
+ * TCPCB_EVER_RETRANS set at the present time. It seems that checking
|
|
|
+ * the head skb is enough except for some reneging corner cases that
|
|
|
+ * are not worth the effort.
|
|
|
+ *
|
|
|
+ * Main reason for all this complexity is the fact that connection dying
|
|
|
+ * time now depends on the validity of the retrans_stamp, in particular,
|
|
|
+ * that successive retransmissions of a segment must not advance
|
|
|
+ * retrans_stamp under any conditions.
|
|
|
+ */
|
|
|
+static bool tcp_any_retrans_done(const struct sock *sk)
|
|
|
+{
|
|
|
+ const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+ struct sk_buff *skb;
|
|
|
+
|
|
|
+ if (tp->retrans_out)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ skb = tcp_write_queue_head(sk);
|
|
|
+ if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
#if FASTRETRANS_DEBUG > 1
|
|
|
static void DBGUNDO(struct sock *sk, const char *msg)
|
|
|
{
|
|
@@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
|
|
|
* is ACKed. For Reno it is MUST to prevent false
|
|
|
* fast retransmits (RFC2582). SACK TCP is safe. */
|
|
|
tcp_moderate_cwnd(tp);
|
|
|
+ if (!tcp_any_retrans_done(sk))
|
|
|
+ tp->retrans_stamp = 0;
|
|
|
return true;
|
|
|
}
|
|
|
tcp_set_ca_state(sk, TCP_CA_Open);
|
|
@@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-/* We can clear retrans_stamp when there are no retransmissions in the
|
|
|
- * window. It would seem that it is trivially available for us in
|
|
|
- * tp->retrans_out, however, that kind of assumptions doesn't consider
|
|
|
- * what will happen if errors occur when sending retransmission for the
|
|
|
- * second time. ...It could the that such segment has only
|
|
|
- * TCPCB_EVER_RETRANS set at the present time. It seems that checking
|
|
|
- * the head skb is enough except for some reneging corner cases that
|
|
|
- * are not worth the effort.
|
|
|
- *
|
|
|
- * Main reason for all this complexity is the fact that connection dying
|
|
|
- * time now depends on the validity of the retrans_stamp, in particular,
|
|
|
- * that successive retransmissions of a segment must not advance
|
|
|
- * retrans_stamp under any conditions.
|
|
|
- */
|
|
|
-static bool tcp_any_retrans_done(const struct sock *sk)
|
|
|
-{
|
|
|
- const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
- struct sk_buff *skb;
|
|
|
-
|
|
|
- if (tp->retrans_out)
|
|
|
- return true;
|
|
|
-
|
|
|
- skb = tcp_write_queue_head(sk);
|
|
|
- if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
|
|
|
- return true;
|
|
|
-
|
|
|
- return false;
|
|
|
-}
|
|
|
-
|
|
|
/* Undo during loss recovery after partial ACK or using F-RTO. */
|
|
|
static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
|
|
{
|