|
@@ -1043,14 +1043,31 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
|
|
|
return tp->is_cwnd_limited;
|
|
|
}
|
|
|
|
|
|
-static inline void tcp_check_probe_timer(struct sock *sk)
|
|
|
+/* Something is really bad, we could not queue an additional packet,
|
|
|
+ * because qdisc is full or receiver sent a 0 window.
|
|
|
+ * We do not want to add fuel to the fire, or abort too early,
|
|
|
+ * so make sure the timer we arm now is at least 200ms in the future,
|
|
|
+ * regardless of current icsk_rto value (as it could be ~2ms)
|
|
|
+ */
|
|
|
+static inline unsigned long tcp_probe0_base(const struct sock *sk)
|
|
|
{
|
|
|
- const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
- const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
+ return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
|
|
|
+}
|
|
|
|
|
|
- if (!tp->packets_out && !icsk->icsk_pending)
|
|
|
+/* Variant of inet_csk_rto_backoff() used for zero window probes */
|
|
|
+static inline unsigned long tcp_probe0_when(const struct sock *sk,
|
|
|
+ unsigned long max_when)
|
|
|
+{
|
|
|
+ u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
|
|
|
+
|
|
|
+ return (unsigned long)min_t(u64, when, max_when);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void tcp_check_probe_timer(struct sock *sk)
|
|
|
+{
|
|
|
+ if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
|
|
|
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
|
|
|
- icsk->icsk_rto, TCP_RTO_MAX);
|
|
|
+ tcp_probe0_base(sk), TCP_RTO_MAX);
|
|
|
}
|
|
|
|
|
|
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
|