|
@@ -2161,8 +2161,6 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
|
|
static bool tcp_time_to_recover(struct sock *sk, int flag)
|
|
static bool tcp_time_to_recover(struct sock *sk, int flag)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
- __u32 packets_out;
|
|
|
|
- int tcp_reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
|
|
|
|
|
|
|
|
/* Trick#1: The loss is proven. */
|
|
/* Trick#1: The loss is proven. */
|
|
if (tp->lost_out)
|
|
if (tp->lost_out)
|
|
@@ -2172,19 +2170,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
|
|
if (tcp_dupack_heuristics(tp) > tp->reordering)
|
|
if (tcp_dupack_heuristics(tp) > tp->reordering)
|
|
return true;
|
|
return true;
|
|
|
|
|
|
- /* Trick#4: It is still not OK... But will it be useful to delay
|
|
|
|
- * recovery more?
|
|
|
|
- */
|
|
|
|
- packets_out = tp->packets_out;
|
|
|
|
- if (packets_out <= tp->reordering &&
|
|
|
|
- tp->sacked_out >= max_t(__u32, packets_out/2, tcp_reordering) &&
|
|
|
|
- !tcp_may_send_now(sk)) {
|
|
|
|
- /* We have nothing to send. This connection is limited
|
|
|
|
- * either by receiver window or by application.
|
|
|
|
- */
|
|
|
|
- return true;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/* If a thin stream is detected, retransmit after first
|
|
/* If a thin stream is detected, retransmit after first
|
|
* received dupack. Employ only if SACK is supported in order
|
|
* received dupack. Employ only if SACK is supported in order
|
|
* to avoid possible corner-case series of spurious retransmissions
|
|
* to avoid possible corner-case series of spurious retransmissions
|