|
@@ -1566,6 +1566,17 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now)
|
|
|
return min_t(u32, segs, sk->sk_gso_max_segs);
|
|
|
}
|
|
|
|
|
|
+/* Return the number of segments we want in the skb we are transmitting.
|
|
|
+ * See if congestion control module wants to decide; otherwise, autosize.
|
|
|
+ */
|
|
|
+static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
|
|
|
+{
|
|
|
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
|
|
|
+ u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
|
|
|
+
|
|
|
+ return tso_segs ? : tcp_tso_autosize(sk, mss_now);
|
|
|
+}
|
|
|
+
|
|
|
/* Returns the portion of skb which can be sent right away */
|
|
|
static unsigned int tcp_mss_split_point(const struct sock *sk,
|
|
|
const struct sk_buff *skb,
|
|
@@ -2061,7 +2072,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- max_segs = tcp_tso_autosize(sk, mss_now);
|
|
|
+ max_segs = tcp_tso_segs(sk, mss_now);
|
|
|
while ((skb = tcp_send_head(sk))) {
|
|
|
unsigned int limit;
|
|
|
|
|
@@ -2778,7 +2789,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
|
|
|
last_lost = tp->snd_una;
|
|
|
}
|
|
|
|
|
|
- max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
|
|
|
+ max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
|
|
|
tcp_for_write_queue_from(skb, sk) {
|
|
|
__u8 sacked;
|
|
|
int segs;
|