|
@@ -1293,6 +1293,20 @@ out_badheader:
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
|
|
|
+{
|
|
|
|
+ /* Invalidate and unmap the data payloads before waking
|
|
|
|
+ * the waiting application. This guarantees the memory
|
|
|
|
+ * regions are properly fenced from the server before the
|
|
|
|
+ * application accesses the data. It also ensures proper
|
|
|
|
+ * send flow control: waking the next RPC waits until this
|
|
|
|
+ * RPC has relinquished all its Send Queue entries.
|
|
|
|
+ */
|
|
|
|
+ if (!list_empty(&req->rl_registered))
|
|
|
|
+ r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
|
|
|
|
+ &req->rl_registered);
|
|
|
|
+}
|
|
|
|
+
|
|
/* Reply handling runs in the poll worker thread. Anything that
|
|
/* Reply handling runs in the poll worker thread. Anything that
|
|
* might wait is deferred to a separate workqueue.
|
|
* might wait is deferred to a separate workqueue.
|
|
*/
|
|
*/
|
|
@@ -1301,18 +1315,9 @@ void rpcrdma_deferred_completion(struct work_struct *work)
|
|
struct rpcrdma_rep *rep =
|
|
struct rpcrdma_rep *rep =
|
|
container_of(work, struct rpcrdma_rep, rr_work);
|
|
container_of(work, struct rpcrdma_rep, rr_work);
|
|
struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
|
|
struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
|
|
- struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
|
|
|
|
|
|
|
|
- /* Invalidate and unmap the data payloads before waking
|
|
|
|
- * the waiting application. This guarantees the memory
|
|
|
|
- * regions are properly fenced from the server before the
|
|
|
|
- * application accesses the data. It also ensures proper
|
|
|
|
- * send flow control: waking the next RPC waits until this
|
|
|
|
- * RPC has relinquished all its Send Queue entries.
|
|
|
|
- */
|
|
|
|
rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
|
|
rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
|
|
- r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &req->rl_registered);
|
|
|
|
-
|
|
|
|
|
|
+ rpcrdma_release_rqst(rep->rr_rxprt, req);
|
|
rpcrdma_complete_rqst(rep);
|
|
rpcrdma_complete_rqst(rep);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1374,6 +1379,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
|
|
req = rpcr_to_rdmar(rqst);
|
|
req = rpcr_to_rdmar(rqst);
|
|
req->rl_reply = rep;
|
|
req->rl_reply = rep;
|
|
rep->rr_rqst = rqst;
|
|
rep->rr_rqst = rqst;
|
|
|
|
+ clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
|
|
|
|
|
|
dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
|
|
dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
|
|
__func__, rep, req, be32_to_cpu(rep->rr_xid));
|
|
__func__, rep, req, be32_to_cpu(rep->rr_xid));
|