|
@@ -900,7 +900,8 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
|
|
|
*/
|
|
|
if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
|
|
|
!tcp_passive_fastopen(sk)) {
|
|
|
- if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
|
|
|
+ err = sk_stream_wait_connect(sk, &timeo);
|
|
|
+ if (err != 0)
|
|
|
goto out_err;
|
|
|
}
|
|
|
|
|
@@ -967,7 +968,8 @@ new_segment:
|
|
|
|
|
|
copied += copy;
|
|
|
offset += copy;
|
|
|
- if (!(size -= copy)) {
|
|
|
+ size -= copy;
|
|
|
+ if (!size) {
|
|
|
tcp_tx_timestamp(sk, skb);
|
|
|
goto out;
|
|
|
}
|
|
@@ -988,7 +990,8 @@ wait_for_memory:
|
|
|
tcp_push(sk, flags & ~MSG_MORE, mss_now,
|
|
|
TCP_NAGLE_PUSH, size_goal);
|
|
|
|
|
|
- if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
|
|
|
+ err = sk_stream_wait_memory(sk, &timeo);
|
|
|
+ if (err != 0)
|
|
|
goto do_error;
|
|
|
|
|
|
mss_now = tcp_send_mss(sk, &size_goal, flags);
|
|
@@ -1111,7 +1114,8 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|
|
*/
|
|
|
if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
|
|
|
!tcp_passive_fastopen(sk)) {
|
|
|
- if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
|
|
|
+ err = sk_stream_wait_connect(sk, &timeo);
|
|
|
+ if (err != 0)
|
|
|
goto do_error;
|
|
|
}
|
|
|
|
|
@@ -1267,7 +1271,8 @@ wait_for_memory:
|
|
|
tcp_push(sk, flags & ~MSG_MORE, mss_now,
|
|
|
TCP_NAGLE_PUSH, size_goal);
|
|
|
|
|
|
- if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
|
|
|
+ err = sk_stream_wait_memory(sk, &timeo);
|
|
|
+ if (err != 0)
|
|
|
goto do_error;
|
|
|
|
|
|
mss_now = tcp_send_mss(sk, &size_goal, flags);
|
|
@@ -1767,7 +1772,8 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
|
|
|
|
|
|
/* __ Restore normal policy in scheduler __ */
|
|
|
|
|
|
- if ((chunk = len - tp->ucopy.len) != 0) {
|
|
|
+ chunk = len - tp->ucopy.len;
|
|
|
+ if (chunk != 0) {
|
|
|
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
|
|
|
len -= chunk;
|
|
|
copied += chunk;
|
|
@@ -1778,7 +1784,8 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
|
|
|
do_prequeue:
|
|
|
tcp_prequeue_process(sk);
|
|
|
|
|
|
- if ((chunk = len - tp->ucopy.len) != 0) {
|
|
|
+ chunk = len - tp->ucopy.len;
|
|
|
+ if (chunk != 0) {
|
|
|
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
|
|
|
len -= chunk;
|
|
|
copied += chunk;
|
|
@@ -2230,7 +2237,8 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|
|
sk->sk_shutdown = 0;
|
|
|
sock_reset_flag(sk, SOCK_DONE);
|
|
|
tp->srtt_us = 0;
|
|
|
- if ((tp->write_seq += tp->max_window + 2) == 0)
|
|
|
+ tp->write_seq += tp->max_window + 2;
|
|
|
+ if (tp->write_seq == 0)
|
|
|
tp->write_seq = 1;
|
|
|
icsk->icsk_backoff = 0;
|
|
|
tp->snd_cwnd = 2;
|