|
@@ -1354,6 +1354,8 @@ void tcp_mtup_init(struct sock *sk)
|
|
icsk->icsk_af_ops->net_header_len;
|
|
icsk->icsk_af_ops->net_header_len;
|
|
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
|
|
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
|
|
icsk->icsk_mtup.probe_size = 0;
|
|
icsk->icsk_mtup.probe_size = 0;
|
|
|
|
+ if (icsk->icsk_mtup.enabled)
|
|
|
|
+ icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(tcp_mtup_init);
|
|
EXPORT_SYMBOL(tcp_mtup_init);
|
|
|
|
|
|
@@ -1828,6 +1830,31 @@ send_now:
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void tcp_mtu_check_reprobe(struct sock *sk)
|
|
|
|
+{
|
|
|
|
+ struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+ struct net *net = sock_net(sk);
|
|
|
|
+ u32 interval;
|
|
|
|
+ s32 delta;
|
|
|
|
+
|
|
|
|
+ interval = net->ipv4.sysctl_tcp_probe_interval;
|
|
|
|
+ delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp;
|
|
|
|
+ if (unlikely(delta >= interval * HZ)) {
|
|
|
|
+ int mss = tcp_current_mss(sk);
|
|
|
|
+
|
|
|
|
+ /* Update current search range */
|
|
|
|
+ icsk->icsk_mtup.probe_size = 0;
|
|
|
|
+ icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
|
|
|
|
+ sizeof(struct tcphdr) +
|
|
|
|
+ icsk->icsk_af_ops->net_header_len;
|
|
|
|
+ icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
|
|
|
|
+
|
|
|
|
+ /* Update probe time stamp */
|
|
|
|
+ icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/* Create a new MTU probe if we are ready.
|
|
/* Create a new MTU probe if we are ready.
|
|
* MTU probe is regularly attempting to increase the path MTU by
|
|
* MTU probe is regularly attempting to increase the path MTU by
|
|
* deliberately sending larger packets. This discovers routing
|
|
* deliberately sending larger packets. This discovers routing
|
|
@@ -1870,9 +1897,16 @@ static int tcp_mtu_probe(struct sock *sk)
|
|
icsk->icsk_mtup.search_low) >> 1);
|
|
icsk->icsk_mtup.search_low) >> 1);
|
|
size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
|
|
size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
|
|
interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
|
|
interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
|
|
|
|
+ /* When misfortune happens, we are reprobing actively,
|
|
|
|
+ * and then reprobe timer has expired. We stick with current
|
|
|
|
+ * probing process by not resetting search range to its orignal.
|
|
|
|
+ */
|
|
if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
|
|
if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
|
|
- interval < max(1, net->ipv4.sysctl_tcp_probe_threshold)) {
|
|
|
|
- /* TODO: set timer for probe_converge_event */
|
|
|
|
|
|
+ interval < net->ipv4.sysctl_tcp_probe_threshold) {
|
|
|
|
+ /* Check whether enough time has elaplased for
|
|
|
|
+ * another round of probing.
|
|
|
|
+ */
|
|
|
|
+ tcp_mtu_check_reprobe(sk);
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|