|
@@ -360,8 +360,10 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
|
|
|
int flags = XS_SENDMSG_FLAGS;
|
|
int flags = XS_SENDMSG_FLAGS;
|
|
|
|
|
|
|
|
remainder -= len;
|
|
remainder -= len;
|
|
|
- if (remainder != 0 || more)
|
|
|
|
|
|
|
+ if (more)
|
|
|
flags |= MSG_MORE;
|
|
flags |= MSG_MORE;
|
|
|
|
|
+ if (remainder != 0)
|
|
|
|
|
+ flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE;
|
|
|
err = do_sendpage(sock, *ppage, base, len, flags);
|
|
err = do_sendpage(sock, *ppage, base, len, flags);
|
|
|
if (remainder == 0 || err != len)
|
|
if (remainder == 0 || err != len)
|
|
|
break;
|
|
break;
|
|
@@ -823,6 +825,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
|
|
|
|
|
|
|
|
kernel_sock_shutdown(sock, SHUT_RDWR);
|
|
kernel_sock_shutdown(sock, SHUT_RDWR);
|
|
|
|
|
|
|
|
|
|
+ mutex_lock(&transport->recv_mutex);
|
|
|
write_lock_bh(&sk->sk_callback_lock);
|
|
write_lock_bh(&sk->sk_callback_lock);
|
|
|
transport->inet = NULL;
|
|
transport->inet = NULL;
|
|
|
transport->sock = NULL;
|
|
transport->sock = NULL;
|
|
@@ -833,6 +836,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
|
|
|
xprt_clear_connected(xprt);
|
|
xprt_clear_connected(xprt);
|
|
|
write_unlock_bh(&sk->sk_callback_lock);
|
|
write_unlock_bh(&sk->sk_callback_lock);
|
|
|
xs_sock_reset_connection_flags(xprt);
|
|
xs_sock_reset_connection_flags(xprt);
|
|
|
|
|
+ mutex_unlock(&transport->recv_mutex);
|
|
|
|
|
|
|
|
trace_rpc_socket_close(xprt, sock);
|
|
trace_rpc_socket_close(xprt, sock);
|
|
|
sock_release(sock);
|
|
sock_release(sock);
|
|
@@ -886,6 +890,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
|
|
|
|
|
|
|
|
cancel_delayed_work_sync(&transport->connect_worker);
|
|
cancel_delayed_work_sync(&transport->connect_worker);
|
|
|
xs_close(xprt);
|
|
xs_close(xprt);
|
|
|
|
|
+ cancel_work_sync(&transport->recv_worker);
|
|
|
xs_xprt_free(xprt);
|
|
xs_xprt_free(xprt);
|
|
|
module_put(THIS_MODULE);
|
|
module_put(THIS_MODULE);
|
|
|
}
|
|
}
|
|
@@ -906,44 +911,36 @@ static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
/**
|
|
|
- * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
|
|
|
|
|
- * @sk: socket with data to read
|
|
|
|
|
|
|
+ * xs_local_data_read_skb
|
|
|
|
|
+ * @xprt: transport
|
|
|
|
|
+ * @sk: socket
|
|
|
|
|
+ * @skb: skbuff
|
|
|
*
|
|
*
|
|
|
* Currently this assumes we can read the whole reply in a single gulp.
|
|
* Currently this assumes we can read the whole reply in a single gulp.
|
|
|
*/
|
|
*/
|
|
|
-static void xs_local_data_ready(struct sock *sk)
|
|
|
|
|
|
|
+static void xs_local_data_read_skb(struct rpc_xprt *xprt,
|
|
|
|
|
+ struct sock *sk,
|
|
|
|
|
+ struct sk_buff *skb)
|
|
|
{
|
|
{
|
|
|
struct rpc_task *task;
|
|
struct rpc_task *task;
|
|
|
- struct rpc_xprt *xprt;
|
|
|
|
|
struct rpc_rqst *rovr;
|
|
struct rpc_rqst *rovr;
|
|
|
- struct sk_buff *skb;
|
|
|
|
|
- int err, repsize, copied;
|
|
|
|
|
|
|
+ int repsize, copied;
|
|
|
u32 _xid;
|
|
u32 _xid;
|
|
|
__be32 *xp;
|
|
__be32 *xp;
|
|
|
|
|
|
|
|
- read_lock_bh(&sk->sk_callback_lock);
|
|
|
|
|
- dprintk("RPC: %s...\n", __func__);
|
|
|
|
|
- xprt = xprt_from_sock(sk);
|
|
|
|
|
- if (xprt == NULL)
|
|
|
|
|
- goto out;
|
|
|
|
|
-
|
|
|
|
|
- skb = skb_recv_datagram(sk, 0, 1, &err);
|
|
|
|
|
- if (skb == NULL)
|
|
|
|
|
- goto out;
|
|
|
|
|
-
|
|
|
|
|
repsize = skb->len - sizeof(rpc_fraghdr);
|
|
repsize = skb->len - sizeof(rpc_fraghdr);
|
|
|
if (repsize < 4) {
|
|
if (repsize < 4) {
|
|
|
dprintk("RPC: impossible RPC reply size %d\n", repsize);
|
|
dprintk("RPC: impossible RPC reply size %d\n", repsize);
|
|
|
- goto dropit;
|
|
|
|
|
|
|
+ return;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
/* Copy the XID from the skb... */
|
|
/* Copy the XID from the skb... */
|
|
|
xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
|
|
xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
|
|
|
if (xp == NULL)
|
|
if (xp == NULL)
|
|
|
- goto dropit;
|
|
|
|
|
|
|
+ return;
|
|
|
|
|
|
|
|
/* Look up and lock the request corresponding to the given XID */
|
|
/* Look up and lock the request corresponding to the given XID */
|
|
|
- spin_lock(&xprt->transport_lock);
|
|
|
|
|
|
|
+ spin_lock_bh(&xprt->transport_lock);
|
|
|
rovr = xprt_lookup_rqst(xprt, *xp);
|
|
rovr = xprt_lookup_rqst(xprt, *xp);
|
|
|
if (!rovr)
|
|
if (!rovr)
|
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
@@ -961,50 +958,68 @@ static void xs_local_data_ready(struct sock *sk)
|
|
|
xprt_complete_rqst(task, copied);
|
|
xprt_complete_rqst(task, copied);
|
|
|
|
|
|
|
|
out_unlock:
|
|
out_unlock:
|
|
|
- spin_unlock(&xprt->transport_lock);
|
|
|
|
|
- dropit:
|
|
|
|
|
- skb_free_datagram(sk, skb);
|
|
|
|
|
- out:
|
|
|
|
|
- read_unlock_bh(&sk->sk_callback_lock);
|
|
|
|
|
|
|
+ spin_unlock_bh(&xprt->transport_lock);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static void xs_local_data_receive(struct sock_xprt *transport)
|
|
|
|
|
+{
|
|
|
|
|
+ struct sk_buff *skb;
|
|
|
|
|
+ struct sock *sk;
|
|
|
|
|
+ int err;
|
|
|
|
|
+
|
|
|
|
|
+ mutex_lock(&transport->recv_mutex);
|
|
|
|
|
+ sk = transport->inet;
|
|
|
|
|
+ if (sk == NULL)
|
|
|
|
|
+ goto out;
|
|
|
|
|
+ for (;;) {
|
|
|
|
|
+ skb = skb_recv_datagram(sk, 0, 1, &err);
|
|
|
|
|
+ if (skb == NULL)
|
|
|
|
|
+ break;
|
|
|
|
|
+ xs_local_data_read_skb(&transport->xprt, sk, skb);
|
|
|
|
|
+ skb_free_datagram(sk, skb);
|
|
|
|
|
+ }
|
|
|
|
|
+out:
|
|
|
|
|
+ mutex_unlock(&transport->recv_mutex);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static void xs_local_data_receive_workfn(struct work_struct *work)
|
|
|
|
|
+{
|
|
|
|
|
+ struct sock_xprt *transport =
|
|
|
|
|
+ container_of(work, struct sock_xprt, recv_worker);
|
|
|
|
|
+ xs_local_data_receive(transport);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
/**
|
|
|
- * xs_udp_data_ready - "data ready" callback for UDP sockets
|
|
|
|
|
- * @sk: socket with data to read
|
|
|
|
|
|
|
+ * xs_udp_data_read_skb - receive callback for UDP sockets
|
|
|
|
|
+ * @xprt: transport
|
|
|
|
|
+ * @sk: socket
|
|
|
|
|
+ * @skb: skbuff
|
|
|
*
|
|
*
|
|
|
*/
|
|
*/
|
|
|
-static void xs_udp_data_ready(struct sock *sk)
|
|
|
|
|
|
|
+static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
|
|
|
|
|
+ struct sock *sk,
|
|
|
|
|
+ struct sk_buff *skb)
|
|
|
{
|
|
{
|
|
|
struct rpc_task *task;
|
|
struct rpc_task *task;
|
|
|
- struct rpc_xprt *xprt;
|
|
|
|
|
struct rpc_rqst *rovr;
|
|
struct rpc_rqst *rovr;
|
|
|
- struct sk_buff *skb;
|
|
|
|
|
- int err, repsize, copied;
|
|
|
|
|
|
|
+ int repsize, copied;
|
|
|
u32 _xid;
|
|
u32 _xid;
|
|
|
__be32 *xp;
|
|
__be32 *xp;
|
|
|
|
|
|
|
|
- read_lock_bh(&sk->sk_callback_lock);
|
|
|
|
|
- dprintk("RPC: xs_udp_data_ready...\n");
|
|
|
|
|
- if (!(xprt = xprt_from_sock(sk)))
|
|
|
|
|
- goto out;
|
|
|
|
|
-
|
|
|
|
|
- if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
|
|
|
|
|
- goto out;
|
|
|
|
|
-
|
|
|
|
|
repsize = skb->len - sizeof(struct udphdr);
|
|
repsize = skb->len - sizeof(struct udphdr);
|
|
|
if (repsize < 4) {
|
|
if (repsize < 4) {
|
|
|
dprintk("RPC: impossible RPC reply size %d!\n", repsize);
|
|
dprintk("RPC: impossible RPC reply size %d!\n", repsize);
|
|
|
- goto dropit;
|
|
|
|
|
|
|
+ return;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
/* Copy the XID from the skb... */
|
|
/* Copy the XID from the skb... */
|
|
|
xp = skb_header_pointer(skb, sizeof(struct udphdr),
|
|
xp = skb_header_pointer(skb, sizeof(struct udphdr),
|
|
|
sizeof(_xid), &_xid);
|
|
sizeof(_xid), &_xid);
|
|
|
if (xp == NULL)
|
|
if (xp == NULL)
|
|
|
- goto dropit;
|
|
|
|
|
|
|
+ return;
|
|
|
|
|
|
|
|
/* Look up and lock the request corresponding to the given XID */
|
|
/* Look up and lock the request corresponding to the given XID */
|
|
|
- spin_lock(&xprt->transport_lock);
|
|
|
|
|
|
|
+ spin_lock_bh(&xprt->transport_lock);
|
|
|
rovr = xprt_lookup_rqst(xprt, *xp);
|
|
rovr = xprt_lookup_rqst(xprt, *xp);
|
|
|
if (!rovr)
|
|
if (!rovr)
|
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
@@ -1025,10 +1040,54 @@ static void xs_udp_data_ready(struct sock *sk)
|
|
|
xprt_complete_rqst(task, copied);
|
|
xprt_complete_rqst(task, copied);
|
|
|
|
|
|
|
|
out_unlock:
|
|
out_unlock:
|
|
|
- spin_unlock(&xprt->transport_lock);
|
|
|
|
|
- dropit:
|
|
|
|
|
- skb_free_datagram(sk, skb);
|
|
|
|
|
- out:
|
|
|
|
|
|
|
+ spin_unlock_bh(&xprt->transport_lock);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static void xs_udp_data_receive(struct sock_xprt *transport)
|
|
|
|
|
+{
|
|
|
|
|
+ struct sk_buff *skb;
|
|
|
|
|
+ struct sock *sk;
|
|
|
|
|
+ int err;
|
|
|
|
|
+
|
|
|
|
|
+ mutex_lock(&transport->recv_mutex);
|
|
|
|
|
+ sk = transport->inet;
|
|
|
|
|
+ if (sk == NULL)
|
|
|
|
|
+ goto out;
|
|
|
|
|
+ for (;;) {
|
|
|
|
|
+ skb = skb_recv_datagram(sk, 0, 1, &err);
|
|
|
|
|
+ if (skb == NULL)
|
|
|
|
|
+ break;
|
|
|
|
|
+ xs_udp_data_read_skb(&transport->xprt, sk, skb);
|
|
|
|
|
+ skb_free_datagram(sk, skb);
|
|
|
|
|
+ }
|
|
|
|
|
+out:
|
|
|
|
|
+ mutex_unlock(&transport->recv_mutex);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static void xs_udp_data_receive_workfn(struct work_struct *work)
|
|
|
|
|
+{
|
|
|
|
|
+ struct sock_xprt *transport =
|
|
|
|
|
+ container_of(work, struct sock_xprt, recv_worker);
|
|
|
|
|
+ xs_udp_data_receive(transport);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/**
|
|
|
|
|
+ * xs_data_ready - "data ready" callback for UDP sockets
|
|
|
|
|
+ * @sk: socket with data to read
|
|
|
|
|
+ *
|
|
|
|
|
+ */
|
|
|
|
|
+static void xs_data_ready(struct sock *sk)
|
|
|
|
|
+{
|
|
|
|
|
+ struct rpc_xprt *xprt;
|
|
|
|
|
+
|
|
|
|
|
+ read_lock_bh(&sk->sk_callback_lock);
|
|
|
|
|
+ dprintk("RPC: xs_data_ready...\n");
|
|
|
|
|
+ xprt = xprt_from_sock(sk);
|
|
|
|
|
+ if (xprt != NULL) {
|
|
|
|
|
+ struct sock_xprt *transport = container_of(xprt,
|
|
|
|
|
+ struct sock_xprt, xprt);
|
|
|
|
|
+ queue_work(rpciod_workqueue, &transport->recv_worker);
|
|
|
|
|
+ }
|
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -1243,12 +1302,12 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
|
|
|
dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
|
|
dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
|
|
|
|
|
|
|
|
/* Find and lock the request corresponding to this xid */
|
|
/* Find and lock the request corresponding to this xid */
|
|
|
- spin_lock(&xprt->transport_lock);
|
|
|
|
|
|
|
+ spin_lock_bh(&xprt->transport_lock);
|
|
|
req = xprt_lookup_rqst(xprt, transport->tcp_xid);
|
|
req = xprt_lookup_rqst(xprt, transport->tcp_xid);
|
|
|
if (!req) {
|
|
if (!req) {
|
|
|
dprintk("RPC: XID %08x request not found!\n",
|
|
dprintk("RPC: XID %08x request not found!\n",
|
|
|
ntohl(transport->tcp_xid));
|
|
ntohl(transport->tcp_xid));
|
|
|
- spin_unlock(&xprt->transport_lock);
|
|
|
|
|
|
|
+ spin_unlock_bh(&xprt->transport_lock);
|
|
|
return -1;
|
|
return -1;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -1257,7 +1316,7 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
|
|
|
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
|
|
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
|
|
|
xprt_complete_rqst(req->rq_task, transport->tcp_copied);
|
|
xprt_complete_rqst(req->rq_task, transport->tcp_copied);
|
|
|
|
|
|
|
|
- spin_unlock(&xprt->transport_lock);
|
|
|
|
|
|
|
+ spin_unlock_bh(&xprt->transport_lock);
|
|
|
return 0;
|
|
return 0;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -1277,10 +1336,10 @@ static int xs_tcp_read_callback(struct rpc_xprt *xprt,
|
|
|
struct rpc_rqst *req;
|
|
struct rpc_rqst *req;
|
|
|
|
|
|
|
|
/* Look up and lock the request corresponding to the given XID */
|
|
/* Look up and lock the request corresponding to the given XID */
|
|
|
- spin_lock(&xprt->transport_lock);
|
|
|
|
|
|
|
+ spin_lock_bh(&xprt->transport_lock);
|
|
|
req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
|
|
req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
|
|
|
if (req == NULL) {
|
|
if (req == NULL) {
|
|
|
- spin_unlock(&xprt->transport_lock);
|
|
|
|
|
|
|
+ spin_unlock_bh(&xprt->transport_lock);
|
|
|
printk(KERN_WARNING "Callback slot table overflowed\n");
|
|
printk(KERN_WARNING "Callback slot table overflowed\n");
|
|
|
xprt_force_disconnect(xprt);
|
|
xprt_force_disconnect(xprt);
|
|
|
return -1;
|
|
return -1;
|
|
@@ -1291,7 +1350,7 @@ static int xs_tcp_read_callback(struct rpc_xprt *xprt,
|
|
|
|
|
|
|
|
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
|
|
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
|
|
|
xprt_complete_bc_request(req, transport->tcp_copied);
|
|
xprt_complete_bc_request(req, transport->tcp_copied);
|
|
|
- spin_unlock(&xprt->transport_lock);
|
|
|
|
|
|
|
+ spin_unlock_bh(&xprt->transport_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
}
|
|
}
|
|
@@ -1391,6 +1450,44 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
|
|
|
return len - desc.count;
|
|
return len - desc.count;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+static void xs_tcp_data_receive(struct sock_xprt *transport)
|
|
|
|
|
+{
|
|
|
|
|
+ struct rpc_xprt *xprt = &transport->xprt;
|
|
|
|
|
+ struct sock *sk;
|
|
|
|
|
+ read_descriptor_t rd_desc = {
|
|
|
|
|
+ .count = 2*1024*1024,
|
|
|
|
|
+ .arg.data = xprt,
|
|
|
|
|
+ };
|
|
|
|
|
+ unsigned long total = 0;
|
|
|
|
|
+ int read = 0;
|
|
|
|
|
+
|
|
|
|
|
+ mutex_lock(&transport->recv_mutex);
|
|
|
|
|
+ sk = transport->inet;
|
|
|
|
|
+ if (sk == NULL)
|
|
|
|
|
+ goto out;
|
|
|
|
|
+
|
|
|
|
|
+ /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
|
|
|
|
|
+ for (;;) {
|
|
|
|
|
+ lock_sock(sk);
|
|
|
|
|
+ read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
|
|
|
|
|
+ release_sock(sk);
|
|
|
|
|
+ if (read <= 0)
|
|
|
|
|
+ break;
|
|
|
|
|
+ total += read;
|
|
|
|
|
+ rd_desc.count = 65536;
|
|
|
|
|
+ }
|
|
|
|
|
+out:
|
|
|
|
|
+ mutex_unlock(&transport->recv_mutex);
|
|
|
|
|
+ trace_xs_tcp_data_ready(xprt, read, total);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static void xs_tcp_data_receive_workfn(struct work_struct *work)
|
|
|
|
|
+{
|
|
|
|
|
+ struct sock_xprt *transport =
|
|
|
|
|
+ container_of(work, struct sock_xprt, recv_worker);
|
|
|
|
|
+ xs_tcp_data_receive(transport);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
/**
|
|
/**
|
|
|
* xs_tcp_data_ready - "data ready" callback for TCP sockets
|
|
* xs_tcp_data_ready - "data ready" callback for TCP sockets
|
|
|
* @sk: socket with data to read
|
|
* @sk: socket with data to read
|
|
@@ -1398,34 +1495,24 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
|
|
|
*/
|
|
*/
|
|
|
static void xs_tcp_data_ready(struct sock *sk)
|
|
static void xs_tcp_data_ready(struct sock *sk)
|
|
|
{
|
|
{
|
|
|
|
|
+ struct sock_xprt *transport;
|
|
|
struct rpc_xprt *xprt;
|
|
struct rpc_xprt *xprt;
|
|
|
- read_descriptor_t rd_desc;
|
|
|
|
|
- int read;
|
|
|
|
|
- unsigned long total = 0;
|
|
|
|
|
|
|
|
|
|
dprintk("RPC: xs_tcp_data_ready...\n");
|
|
dprintk("RPC: xs_tcp_data_ready...\n");
|
|
|
|
|
|
|
|
read_lock_bh(&sk->sk_callback_lock);
|
|
read_lock_bh(&sk->sk_callback_lock);
|
|
|
- if (!(xprt = xprt_from_sock(sk))) {
|
|
|
|
|
- read = 0;
|
|
|
|
|
|
|
+ if (!(xprt = xprt_from_sock(sk)))
|
|
|
goto out;
|
|
goto out;
|
|
|
- }
|
|
|
|
|
|
|
+ transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
+
|
|
|
/* Any data means we had a useful conversation, so
|
|
/* Any data means we had a useful conversation, so
|
|
|
* the we don't need to delay the next reconnect
|
|
* the we don't need to delay the next reconnect
|
|
|
*/
|
|
*/
|
|
|
if (xprt->reestablish_timeout)
|
|
if (xprt->reestablish_timeout)
|
|
|
xprt->reestablish_timeout = 0;
|
|
xprt->reestablish_timeout = 0;
|
|
|
|
|
+ queue_work(rpciod_workqueue, &transport->recv_worker);
|
|
|
|
|
|
|
|
- /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
|
|
|
|
|
- rd_desc.arg.data = xprt;
|
|
|
|
|
- do {
|
|
|
|
|
- rd_desc.count = 65536;
|
|
|
|
|
- read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
|
|
|
|
|
- if (read > 0)
|
|
|
|
|
- total += read;
|
|
|
|
|
- } while (read > 0);
|
|
|
|
|
out:
|
|
out:
|
|
|
- trace_xs_tcp_data_ready(xprt, read, total);
|
|
|
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -1873,7 +1960,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
|
|
|
xs_save_old_callbacks(transport, sk);
|
|
xs_save_old_callbacks(transport, sk);
|
|
|
|
|
|
|
|
sk->sk_user_data = xprt;
|
|
sk->sk_user_data = xprt;
|
|
|
- sk->sk_data_ready = xs_local_data_ready;
|
|
|
|
|
|
|
+ sk->sk_data_ready = xs_data_ready;
|
|
|
sk->sk_write_space = xs_udp_write_space;
|
|
sk->sk_write_space = xs_udp_write_space;
|
|
|
sk->sk_error_report = xs_error_report;
|
|
sk->sk_error_report = xs_error_report;
|
|
|
sk->sk_allocation = GFP_NOIO;
|
|
sk->sk_allocation = GFP_NOIO;
|
|
@@ -2059,7 +2146,7 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
|
|
|
xs_save_old_callbacks(transport, sk);
|
|
xs_save_old_callbacks(transport, sk);
|
|
|
|
|
|
|
|
sk->sk_user_data = xprt;
|
|
sk->sk_user_data = xprt;
|
|
|
- sk->sk_data_ready = xs_udp_data_ready;
|
|
|
|
|
|
|
+ sk->sk_data_ready = xs_data_ready;
|
|
|
sk->sk_write_space = xs_udp_write_space;
|
|
sk->sk_write_space = xs_udp_write_space;
|
|
|
sk->sk_allocation = GFP_NOIO;
|
|
sk->sk_allocation = GFP_NOIO;
|
|
|
|
|
|
|
@@ -2650,6 +2737,7 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
new = container_of(xprt, struct sock_xprt, xprt);
|
|
new = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
+ mutex_init(&new->recv_mutex);
|
|
|
memcpy(&xprt->addr, args->dstaddr, args->addrlen);
|
|
memcpy(&xprt->addr, args->dstaddr, args->addrlen);
|
|
|
xprt->addrlen = args->addrlen;
|
|
xprt->addrlen = args->addrlen;
|
|
|
if (args->srcaddr)
|
|
if (args->srcaddr)
|
|
@@ -2703,6 +2791,7 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
|
|
|
xprt->ops = &xs_local_ops;
|
|
xprt->ops = &xs_local_ops;
|
|
|
xprt->timeout = &xs_local_default_timeout;
|
|
xprt->timeout = &xs_local_default_timeout;
|
|
|
|
|
|
|
|
|
|
+ INIT_WORK(&transport->recv_worker, xs_local_data_receive_workfn);
|
|
|
INIT_DELAYED_WORK(&transport->connect_worker,
|
|
INIT_DELAYED_WORK(&transport->connect_worker,
|
|
|
xs_dummy_setup_socket);
|
|
xs_dummy_setup_socket);
|
|
|
|
|
|
|
@@ -2774,21 +2863,20 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
|
|
|
|
|
|
|
|
xprt->timeout = &xs_udp_default_timeout;
|
|
xprt->timeout = &xs_udp_default_timeout;
|
|
|
|
|
|
|
|
|
|
+ INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
|
|
|
|
|
+ INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
|
|
|
|
|
+
|
|
|
switch (addr->sa_family) {
|
|
switch (addr->sa_family) {
|
|
|
case AF_INET:
|
|
case AF_INET:
|
|
|
if (((struct sockaddr_in *)addr)->sin_port != htons(0))
|
|
if (((struct sockaddr_in *)addr)->sin_port != htons(0))
|
|
|
xprt_set_bound(xprt);
|
|
xprt_set_bound(xprt);
|
|
|
|
|
|
|
|
- INIT_DELAYED_WORK(&transport->connect_worker,
|
|
|
|
|
- xs_udp_setup_socket);
|
|
|
|
|
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
|
|
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
|
|
|
break;
|
|
break;
|
|
|
case AF_INET6:
|
|
case AF_INET6:
|
|
|
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
|
|
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
|
|
|
xprt_set_bound(xprt);
|
|
xprt_set_bound(xprt);
|
|
|
|
|
|
|
|
- INIT_DELAYED_WORK(&transport->connect_worker,
|
|
|
|
|
- xs_udp_setup_socket);
|
|
|
|
|
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
|
|
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
|
|
|
break;
|
|
break;
|
|
|
default:
|
|
default:
|
|
@@ -2853,21 +2941,20 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
|
|
|
xprt->ops = &xs_tcp_ops;
|
|
xprt->ops = &xs_tcp_ops;
|
|
|
xprt->timeout = &xs_tcp_default_timeout;
|
|
xprt->timeout = &xs_tcp_default_timeout;
|
|
|
|
|
|
|
|
|
|
+ INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
|
|
|
|
|
+ INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
|
|
|
|
|
+
|
|
|
switch (addr->sa_family) {
|
|
switch (addr->sa_family) {
|
|
|
case AF_INET:
|
|
case AF_INET:
|
|
|
if (((struct sockaddr_in *)addr)->sin_port != htons(0))
|
|
if (((struct sockaddr_in *)addr)->sin_port != htons(0))
|
|
|
xprt_set_bound(xprt);
|
|
xprt_set_bound(xprt);
|
|
|
|
|
|
|
|
- INIT_DELAYED_WORK(&transport->connect_worker,
|
|
|
|
|
- xs_tcp_setup_socket);
|
|
|
|
|
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
|
|
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
|
|
|
break;
|
|
break;
|
|
|
case AF_INET6:
|
|
case AF_INET6:
|
|
|
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
|
|
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
|
|
|
xprt_set_bound(xprt);
|
|
xprt_set_bound(xprt);
|
|
|
|
|
|
|
|
- INIT_DELAYED_WORK(&transport->connect_worker,
|
|
|
|
|
- xs_tcp_setup_socket);
|
|
|
|
|
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
|
|
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
|
|
|
break;
|
|
break;
|
|
|
default:
|
|
default:
|