|
@@ -400,7 +400,6 @@ void tcp_init_sock(struct sock *sk)
|
|
|
|
|
|
tp->out_of_order_queue = RB_ROOT;
|
|
|
tcp_init_xmit_timers(sk);
|
|
|
- tcp_prequeue_init(tp);
|
|
|
INIT_LIST_HEAD(&tp->tsq_node);
|
|
|
|
|
|
icsk->icsk_rto = TCP_TIMEOUT_INIT;
|
|
@@ -1525,20 +1524,6 @@ static void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
|
|
tcp_send_ack(sk);
|
|
|
}
|
|
|
|
|
|
-static void tcp_prequeue_process(struct sock *sk)
|
|
|
-{
|
|
|
- struct sk_buff *skb;
|
|
|
- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
-
|
|
|
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
|
|
|
-
|
|
|
- while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
|
|
|
- sk_backlog_rcv(sk, skb);
|
|
|
-
|
|
|
- /* Clear memory counter. */
|
|
|
- tp->ucopy.memory = 0;
|
|
|
-}
|
|
|
-
|
|
|
static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
|
|
|
{
|
|
|
struct sk_buff *skb;
|
|
@@ -1671,7 +1656,6 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
|
|
|
int err;
|
|
|
int target; /* Read at least this many bytes */
|
|
|
long timeo;
|
|
|
- struct task_struct *user_recv = NULL;
|
|
|
struct sk_buff *skb, *last;
|
|
|
u32 urg_hole = 0;
|
|
|
|
|
@@ -1806,51 +1790,6 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
|
|
|
|
|
|
tcp_cleanup_rbuf(sk, copied);
|
|
|
|
|
|
- if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
|
|
|
- /* Install new reader */
|
|
|
- if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
|
|
|
- user_recv = current;
|
|
|
- tp->ucopy.task = user_recv;
|
|
|
- tp->ucopy.msg = msg;
|
|
|
- }
|
|
|
-
|
|
|
- tp->ucopy.len = len;
|
|
|
-
|
|
|
- WARN_ON(tp->copied_seq != tp->rcv_nxt &&
|
|
|
- !(flags & (MSG_PEEK | MSG_TRUNC)));
|
|
|
-
|
|
|
- /* Ugly... If prequeue is not empty, we have to
|
|
|
- * process it before releasing socket, otherwise
|
|
|
- * order will be broken at second iteration.
|
|
|
- * More elegant solution is required!!!
|
|
|
- *
|
|
|
- * Look: we have the following (pseudo)queues:
|
|
|
- *
|
|
|
- * 1. packets in flight
|
|
|
- * 2. backlog
|
|
|
- * 3. prequeue
|
|
|
- * 4. receive_queue
|
|
|
- *
|
|
|
- * Each queue can be processed only if the next ones
|
|
|
- * are empty. At this point we have empty receive_queue.
|
|
|
- * But prequeue _can_ be not empty after 2nd iteration,
|
|
|
- * when we jumped to start of loop because backlog
|
|
|
- * processing added something to receive_queue.
|
|
|
- * We cannot release_sock(), because backlog contains
|
|
|
- * packets arrived _after_ prequeued ones.
|
|
|
- *
|
|
|
- * Shortly, algorithm is clear --- to process all
|
|
|
- * the queues in order. We could make it more directly,
|
|
|
- * requeueing packets from backlog to prequeue, if
|
|
|
- * is not empty. It is more elegant, but eats cycles,
|
|
|
- * unfortunately.
|
|
|
- */
|
|
|
- if (!skb_queue_empty(&tp->ucopy.prequeue))
|
|
|
- goto do_prequeue;
|
|
|
-
|
|
|
- /* __ Set realtime policy in scheduler __ */
|
|
|
- }
|
|
|
-
|
|
|
if (copied >= target) {
|
|
|
/* Do not sleep, just process backlog. */
|
|
|
release_sock(sk);
|
|
@@ -1859,31 +1798,6 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
|
|
|
sk_wait_data(sk, &timeo, last);
|
|
|
}
|
|
|
|
|
|
- if (user_recv) {
|
|
|
- int chunk;
|
|
|
-
|
|
|
- /* __ Restore normal policy in scheduler __ */
|
|
|
-
|
|
|
- chunk = len - tp->ucopy.len;
|
|
|
- if (chunk != 0) {
|
|
|
- NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
|
|
|
- len -= chunk;
|
|
|
- copied += chunk;
|
|
|
- }
|
|
|
-
|
|
|
- if (tp->rcv_nxt == tp->copied_seq &&
|
|
|
- !skb_queue_empty(&tp->ucopy.prequeue)) {
|
|
|
-do_prequeue:
|
|
|
- tcp_prequeue_process(sk);
|
|
|
-
|
|
|
- chunk = len - tp->ucopy.len;
|
|
|
- if (chunk != 0) {
|
|
|
- NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
|
|
|
- len -= chunk;
|
|
|
- copied += chunk;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
if ((flags & MSG_PEEK) &&
|
|
|
(peek_seq - copied - urg_hole != tp->copied_seq)) {
|
|
|
net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
|
|
@@ -1955,25 +1869,6 @@ skip_copy:
|
|
|
break;
|
|
|
} while (len > 0);
|
|
|
|
|
|
- if (user_recv) {
|
|
|
- if (!skb_queue_empty(&tp->ucopy.prequeue)) {
|
|
|
- int chunk;
|
|
|
-
|
|
|
- tp->ucopy.len = copied > 0 ? len : 0;
|
|
|
-
|
|
|
- tcp_prequeue_process(sk);
|
|
|
-
|
|
|
- if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
|
|
|
- NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
|
|
|
- len -= chunk;
|
|
|
- copied += chunk;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- tp->ucopy.task = NULL;
|
|
|
- tp->ucopy.len = 0;
|
|
|
- }
|
|
|
-
|
|
|
/* According to UNIX98, msg_name/msg_namelen are ignored
|
|
|
* on connected socket. I was just happy when found this 8) --ANK
|
|
|
*/
|