Эх сурвалжийг харах

Merge branch 'tcp-tso'

Eric Dumazet says:

====================
tcp: tso improvements

This patch serie reworks tcp_tso_should_defer() a bit
to get less bursts, and better ECN behavior.

We also removed tso_deferred field in tcp socket.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 10 жил өмнө
parent
commit
f9c7ce1853

+ 0 - 1
include/linux/tcp.h

@@ -236,7 +236,6 @@ struct tcp_sock {
 	u32	lost_out;	/* Lost packets			*/
 	u32	lost_out;	/* Lost packets			*/
 	u32	sacked_out;	/* SACK'd packets			*/
 	u32	sacked_out;	/* SACK'd packets			*/
 	u32	fackets_out;	/* FACK'd packets			*/
 	u32	fackets_out;	/* FACK'd packets			*/
-	u32	tso_deferred;
 
 
 	/* from STCP, retrans queue hinting */
 	/* from STCP, retrans queue hinting */
 	struct sk_buff* lost_skb_hint;
 	struct sk_buff* lost_skb_hint;

+ 17 - 12
net/ipv4/tcp_output.c

@@ -1752,20 +1752,23 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 				 bool *is_cwnd_limited, u32 max_segs)
 				 bool *is_cwnd_limited, u32 max_segs)
 {
 {
-	struct tcp_sock *tp = tcp_sk(sk);
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	const struct inet_connection_sock *icsk = inet_csk(sk);
-	u32 send_win, cong_win, limit, in_flight;
+	u32 age, send_win, cong_win, limit, in_flight;
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct skb_mstamp now;
+	struct sk_buff *head;
 	int win_divisor;
 	int win_divisor;
 
 
 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
 		goto send_now;
 		goto send_now;
 
 
-	if (icsk->icsk_ca_state != TCP_CA_Open)
+	if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR)))
 		goto send_now;
 		goto send_now;
 
 
-	/* Defer for less than two clock ticks. */
-	if (tp->tso_deferred &&
-	    (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
+	/* Avoid bursty behavior by allowing defer
+	 * only if the last write was recent.
+	 */
+	if ((s32)(tcp_time_stamp - tp->lsndtime) > 0)
 		goto send_now;
 		goto send_now;
 
 
 	in_flight = tcp_packets_in_flight(tp);
 	in_flight = tcp_packets_in_flight(tp);
@@ -1807,11 +1810,14 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 			goto send_now;
 			goto send_now;
 	}
 	}
 
 
-	/* Ok, it looks like it is advisable to defer.
-	 * Do not rearm the timer if already set to not break TCP ACK clocking.
-	 */
-	if (!tp->tso_deferred)
-		tp->tso_deferred = 1 | (jiffies << 1);
+	head = tcp_write_queue_head(sk);
+	skb_mstamp_get(&now);
+	age = skb_mstamp_us_delta(&now, &head->skb_mstamp);
+	/* If next ACK is likely to come too late (half srtt), do not defer */
+	if (age < (tp->srtt_us >> 4))
+		goto send_now;
+
+	/* Ok, it looks like it is advisable to defer. */
 
 
 	if (cong_win < send_win && cong_win < skb->len)
 	if (cong_win < send_win && cong_win < skb->len)
 		*is_cwnd_limited = true;
 		*is_cwnd_limited = true;
@@ -1819,7 +1825,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 	return true;
 	return true;
 
 
 send_now:
 send_now:
-	tp->tso_deferred = 0;
 	return false;
 	return false;
 }
 }