|
@@ -426,6 +426,15 @@ void tcp_init_sock(struct sock *sk)
|
|
|
}
|
|
|
EXPORT_SYMBOL(tcp_init_sock);
|
|
|
|
|
|
+void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
|
+
|
|
|
+ sock_tx_timestamp(sk, &shinfo->tx_flags);
|
|
|
+ if (shinfo->tx_flags & SKBTX_ANY_SW_TSTAMP)
|
|
|
+ shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Wait for a TCP event.
|
|
|
*
|
|
@@ -523,7 +532,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
|
|
}
|
|
|
/* This barrier is coupled with smp_wmb() in tcp_reset() */
|
|
|
smp_rmb();
|
|
|
- if (sk->sk_err)
|
|
|
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
|
|
|
mask |= POLLERR;
|
|
|
|
|
|
return mask;
|
|
@@ -959,8 +968,10 @@ new_segment:
|
|
|
|
|
|
copied += copy;
|
|
|
offset += copy;
|
|
|
- if (!(size -= copy))
|
|
|
+ if (!(size -= copy)) {
|
|
|
+ tcp_tx_timestamp(sk, skb);
|
|
|
goto out;
|
|
|
+ }
|
|
|
|
|
|
if (skb->len < size_goal || (flags & MSG_OOB))
|
|
|
continue;
|
|
@@ -1252,8 +1263,10 @@ new_segment:
|
|
|
|
|
|
from += copy;
|
|
|
copied += copy;
|
|
|
- if ((seglen -= copy) == 0 && iovlen == 0)
|
|
|
+ if ((seglen -= copy) == 0 && iovlen == 0) {
|
|
|
+ tcp_tx_timestamp(sk, skb);
|
|
|
goto out;
|
|
|
+ }
|
|
|
|
|
|
if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
|
|
|
continue;
|
|
@@ -1617,6 +1630,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|
|
struct sk_buff *skb;
|
|
|
u32 urg_hole = 0;
|
|
|
|
|
|
+ if (unlikely(flags & MSG_ERRQUEUE))
|
|
|
+ return ip_recv_error(sk, msg, len, addr_len);
|
|
|
+
|
|
|
if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
|
|
|
(sk->sk_state == TCP_ESTABLISHED))
|
|
|
sk_busy_loop(sk, nonblock);
|