|
@@ -212,39 +212,23 @@ out:
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
|
|
|
|
|
|
-/*
|
|
|
- * One or more rpc_rqst structure have been preallocated during the
|
|
|
- * backchannel setup. Buffer space for the send and private XDR buffers
|
|
|
- * has been preallocated as well. Use xprt_alloc_bc_request to allocate
|
|
|
- * to this request. Use xprt_free_bc_request to return it.
|
|
|
- *
|
|
|
- * We know that we're called in soft interrupt context, grab the spin_lock
|
|
|
- * since there is no need to grab the bottom half spin_lock.
|
|
|
- *
|
|
|
- * Return an available rpc_rqst, otherwise NULL if non are available.
|
|
|
- */
|
|
|
-struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
|
|
|
+static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
|
|
|
{
|
|
|
- struct rpc_rqst *req;
|
|
|
+ struct rpc_rqst *req = NULL;
|
|
|
|
|
|
dprintk("RPC: allocate a backchannel request\n");
|
|
|
- spin_lock(&xprt->bc_pa_lock);
|
|
|
- if (!list_empty(&xprt->bc_pa_list)) {
|
|
|
- req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
|
|
|
- rq_bc_pa_list);
|
|
|
- list_del(&req->rq_bc_pa_list);
|
|
|
- } else {
|
|
|
- req = NULL;
|
|
|
- }
|
|
|
- spin_unlock(&xprt->bc_pa_lock);
|
|
|
+ if (list_empty(&xprt->bc_pa_list))
|
|
|
+ goto not_found;
|
|
|
|
|
|
- if (req != NULL) {
|
|
|
- set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
|
|
|
- req->rq_reply_bytes_recvd = 0;
|
|
|
- req->rq_bytes_sent = 0;
|
|
|
- memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
|
|
|
+ req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
|
|
|
+ rq_bc_pa_list);
|
|
|
+ req->rq_reply_bytes_recvd = 0;
|
|
|
+ req->rq_bytes_sent = 0;
|
|
|
+ memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
|
|
|
sizeof(req->rq_private_buf));
|
|
|
- }
|
|
|
+ req->rq_xid = xid;
|
|
|
+ req->rq_connect_cookie = xprt->connect_cookie;
|
|
|
+not_found:
|
|
|
dprintk("RPC: backchannel req=%p\n", req);
|
|
|
return req;
|
|
|
}
|
|
@@ -259,6 +243,7 @@ void xprt_free_bc_request(struct rpc_rqst *req)
|
|
|
|
|
|
dprintk("RPC: free backchannel req=%p\n", req);
|
|
|
|
|
|
+ req->rq_connect_cookie = xprt->connect_cookie - 1;
|
|
|
smp_mb__before_clear_bit();
|
|
|
WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
|
|
|
clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
|
|
@@ -281,7 +266,57 @@ void xprt_free_bc_request(struct rpc_rqst *req)
|
|
|
* may be reused by a new callback request.
|
|
|
*/
|
|
|
spin_lock_bh(&xprt->bc_pa_lock);
|
|
|
- list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
|
|
|
+ list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
|
|
|
spin_unlock_bh(&xprt->bc_pa_lock);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * One or more rpc_rqst structure have been preallocated during the
|
|
|
+ * backchannel setup. Buffer space for the send and private XDR buffers
|
|
|
+ * has been preallocated as well. Use xprt_alloc_bc_request to allocate
|
|
|
+ * to this request. Use xprt_free_bc_request to return it.
|
|
|
+ *
|
|
|
+ * We know that we're called in soft interrupt context, grab the spin_lock
|
|
|
+ * since there is no need to grab the bottom half spin_lock.
|
|
|
+ *
|
|
|
+ * Return an available rpc_rqst, otherwise NULL if non are available.
|
|
|
+ */
|
|
|
+struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
|
|
|
+{
|
|
|
+ struct rpc_rqst *req;
|
|
|
+
|
|
|
+ spin_lock(&xprt->bc_pa_lock);
|
|
|
+ list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
|
|
|
+ if (req->rq_connect_cookie != xprt->connect_cookie)
|
|
|
+ continue;
|
|
|
+ if (req->rq_xid == xid)
|
|
|
+ goto found;
|
|
|
+ }
|
|
|
+ req = xprt_alloc_bc_request(xprt, xid);
|
|
|
+found:
|
|
|
+ spin_unlock(&xprt->bc_pa_lock);
|
|
|
+ return req;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Add callback request to callback list. The callback
|
|
|
+ * service sleeps on the sv_cb_waitq waiting for new
|
|
|
+ * requests. Wake it up after adding enqueing the
|
|
|
+ * request.
|
|
|
+ */
|
|
|
+void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
|
|
|
+{
|
|
|
+ struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
+ struct svc_serv *bc_serv = xprt->bc_serv;
|
|
|
+
|
|
|
+ req->rq_private_buf.len = copied;
|
|
|
+ set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
|
|
|
+
|
|
|
+ dprintk("RPC: add callback request to list\n");
|
|
|
+ spin_lock(&bc_serv->sv_cb_lock);
|
|
|
+ list_del(&req->rq_bc_pa_list);
|
|
|
+ list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
|
|
|
+ wake_up(&bc_serv->sv_cb_waitq);
|
|
|
+ spin_unlock(&bc_serv->sv_cb_lock);
|
|
|
+}
|
|
|
+
|