|
@@ -21,6 +21,32 @@ static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
|
|
|
return t1 > t2 || (t1 == t2 && after(seq1, seq2));
|
|
|
}
|
|
|
|
|
|
+u32 tcp_rack_reo_wnd(const struct sock *sk)
|
|
|
+{
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+
|
|
|
+ if (!tp->rack.reord) {
|
|
|
+ /* If reordering has not been observed, be aggressive during
|
|
|
+ * the recovery or starting the recovery by DUPACK threshold.
|
|
|
+ */
|
|
|
+ if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (tp->sacked_out >= tp->reordering &&
|
|
|
+ !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* To be more reordering resilient, allow min_rtt/4 settling delay.
|
|
|
+ * Use min_rtt instead of the smoothed RTT because reordering is
|
|
|
+ * often a path property and less related to queuing or delayed ACKs.
|
|
|
+ * Upon receiving DSACKs, linearly increase the window up to the
|
|
|
+ * smoothed RTT.
|
|
|
+ */
|
|
|
+ return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
|
|
|
+ tp->srtt_us >> 3);
|
|
|
+}
|
|
|
+
|
|
|
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
|
|
|
*
|
|
|
* Marks a packet lost, if some packet sent later has been (s)acked.
|
|
@@ -44,23 +70,11 @@ static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
|
|
|
static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
- u32 min_rtt = tcp_min_rtt(tp);
|
|
|
struct sk_buff *skb, *n;
|
|
|
u32 reo_wnd;
|
|
|
|
|
|
*reo_timeout = 0;
|
|
|
- /* To be more reordering resilient, allow min_rtt/4 settling delay
|
|
|
- * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
|
|
|
- * RTT because reordering is often a path property and less related
|
|
|
- * to queuing or delayed ACKs.
|
|
|
- */
|
|
|
- reo_wnd = 1000;
|
|
|
- if ((tp->rack.reord || inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery) &&
|
|
|
- min_rtt != ~0U) {
|
|
|
- reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd);
|
|
|
- reo_wnd = min(reo_wnd, tp->srtt_us >> 3);
|
|
|
- }
|
|
|
-
|
|
|
+ reo_wnd = tcp_rack_reo_wnd(sk);
|
|
|
list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
|
|
|
tcp_tsorted_anchor) {
|
|
|
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
|