|
@@ -576,8 +576,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
|
|
|
void tcp_rcv_space_adjust(struct sock *sk)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+ u32 copied;
|
|
|
int time;
|
|
|
- int copied;
|
|
|
|
|
|
tcp_mstamp_refresh(tp);
|
|
|
time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
|
|
@@ -600,12 +600,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
|
|
|
|
|
if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
|
|
|
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
|
|
|
- int rcvwin, rcvmem, rcvbuf;
|
|
|
+ int rcvmem, rcvbuf;
|
|
|
+ u64 rcvwin;
|
|
|
|
|
|
/* minimal window to cope with packet losses, assuming
|
|
|
* steady state. Add some cushion because of small variations.
|
|
|
*/
|
|
|
- rcvwin = (copied << 1) + 16 * tp->advmss;
|
|
|
+ rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
|
|
|
|
|
|
/* If rate increased by 25%,
|
|
|
* assume slow start, rcvwin = 3 * copied
|
|
@@ -625,8 +626,9 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
|
|
while (tcp_win_from_space(sk, rcvmem) < tp->advmss)
|
|
|
rcvmem += 128;
|
|
|
|
|
|
- rcvbuf = min(rcvwin / tp->advmss * rcvmem,
|
|
|
- sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
|
|
|
+ do_div(rcvwin, tp->advmss);
|
|
|
+ rcvbuf = min_t(u64, rcvwin * rcvmem,
|
|
|
+ sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
|
|
|
if (rcvbuf > sk->sk_rcvbuf) {
|
|
|
sk->sk_rcvbuf = rcvbuf;
|
|
|
|