|
@@ -261,23 +261,26 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
|
|
|
sk->sk_pacing_rate = rate;
|
|
|
}
|
|
|
|
|
|
-/* Return count of segments we want in the skbs we send, or 0 for default. */
|
|
|
-static u32 bbr_tso_segs_goal(struct sock *sk)
|
|
|
+/* override sysctl_tcp_min_tso_segs */
|
|
|
+static u32 bbr_min_tso_segs(struct sock *sk)
|
|
|
{
|
|
|
- struct bbr *bbr = inet_csk_ca(sk);
|
|
|
-
|
|
|
- return bbr->tso_segs_goal;
|
|
|
+ return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
|
|
|
}
|
|
|
|
|
|
static void bbr_set_tso_segs_goal(struct sock *sk)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
struct bbr *bbr = inet_csk_ca(sk);
|
|
|
- u32 min_segs;
|
|
|
+ u32 segs, bytes;
|
|
|
+
|
|
|
+ /* Sort of tcp_tso_autosize() but ignoring
|
|
|
+ * driver provided sk_gso_max_size.
|
|
|
+ */
|
|
|
+ bytes = min_t(u32, sk->sk_pacing_rate >> sk->sk_pacing_shift,
|
|
|
+ GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
|
|
|
+ segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
|
|
|
|
|
|
- min_segs = sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
|
|
|
- bbr->tso_segs_goal = min(tcp_tso_autosize(sk, tp->mss_cache, min_segs),
|
|
|
- 0x7FU);
|
|
|
+ bbr->tso_segs_goal = min(segs, 0x7FU);
|
|
|
}
|
|
|
|
|
|
/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
|
|
@@ -936,7 +939,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
|
|
|
.undo_cwnd = bbr_undo_cwnd,
|
|
|
.cwnd_event = bbr_cwnd_event,
|
|
|
.ssthresh = bbr_ssthresh,
|
|
|
- .tso_segs_goal = bbr_tso_segs_goal,
|
|
|
+ .min_tso_segs = bbr_min_tso_segs,
|
|
|
.get_info = bbr_get_info,
|
|
|
.set_state = bbr_set_state,
|
|
|
};
|