|
@@ -169,40 +169,41 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
|
|
|
return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
|
|
|
}
|
|
|
|
|
|
-/* Split "vec" on page boundaries into segments. FMR registers pages,
|
|
|
- * not a byte range. Other modes coalesce these segments into a single
|
|
|
- * MR when they can.
|
|
|
+/* Split @vec on page boundaries into SGEs. FMR registers pages, not
|
|
|
+ * a byte range. Other modes coalesce these SGEs into a single MR
|
|
|
+ * when they can.
|
|
|
+ *
|
|
|
+ * Returns pointer to next available SGE, and bumps the total number
|
|
|
+ * of SGEs consumed.
|
|
|
*/
|
|
|
-static int
|
|
|
-rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n)
|
|
|
+static struct rpcrdma_mr_seg *
|
|
|
+rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
|
|
|
+ unsigned int *n)
|
|
|
{
|
|
|
- size_t page_offset;
|
|
|
- u32 remaining;
|
|
|
+ u32 remaining, page_offset;
|
|
|
char *base;
|
|
|
|
|
|
base = vec->iov_base;
|
|
|
page_offset = offset_in_page(base);
|
|
|
remaining = vec->iov_len;
|
|
|
- while (remaining && n < RPCRDMA_MAX_SEGS) {
|
|
|
- seg[n].mr_page = NULL;
|
|
|
- seg[n].mr_offset = base;
|
|
|
- seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
|
|
|
- remaining -= seg[n].mr_len;
|
|
|
- base += seg[n].mr_len;
|
|
|
- ++n;
|
|
|
+ while (remaining) {
|
|
|
+ seg->mr_page = NULL;
|
|
|
+ seg->mr_offset = base;
|
|
|
+ seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
|
|
|
+ remaining -= seg->mr_len;
|
|
|
+ base += seg->mr_len;
|
|
|
+ ++seg;
|
|
|
+ ++(*n);
|
|
|
page_offset = 0;
|
|
|
}
|
|
|
- return n;
|
|
|
+ return seg;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Chunk assembly from upper layer xdr_buf.
|
|
|
- *
|
|
|
- * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
|
|
|
- * elements. Segments are then coalesced when registered, if possible
|
|
|
- * within the selected memreg mode.
|
|
|
+/* Convert @xdrbuf into SGEs no larger than a page each. As they
|
|
|
+ * are registered, these SGEs are then coalesced into RDMA segments
|
|
|
+ * when the selected memreg mode supports it.
|
|
|
*
|
|
|
- * Returns positive number of segments converted, or a negative errno.
|
|
|
+ * Returns positive number of SGEs consumed, or a negative errno.
|
|
|
*/
|
|
|
|
|
|
static int
|
|
@@ -210,47 +211,41 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
|
|
|
unsigned int pos, enum rpcrdma_chunktype type,
|
|
|
struct rpcrdma_mr_seg *seg)
|
|
|
{
|
|
|
- int len, n, p, page_base;
|
|
|
+ unsigned long page_base;
|
|
|
+ unsigned int len, n;
|
|
|
struct page **ppages;
|
|
|
|
|
|
n = 0;
|
|
|
- if (pos == 0) {
|
|
|
- n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n);
|
|
|
- if (n == RPCRDMA_MAX_SEGS)
|
|
|
- goto out_overflow;
|
|
|
- }
|
|
|
+ if (pos == 0)
|
|
|
+ seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
|
|
|
|
|
|
len = xdrbuf->page_len;
|
|
|
ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
|
|
|
page_base = offset_in_page(xdrbuf->page_base);
|
|
|
- p = 0;
|
|
|
- while (len && n < RPCRDMA_MAX_SEGS) {
|
|
|
- if (!ppages[p]) {
|
|
|
- /* alloc the pagelist for receiving buffer */
|
|
|
- ppages[p] = alloc_page(GFP_ATOMIC);
|
|
|
- if (!ppages[p])
|
|
|
+ while (len) {
|
|
|
+ if (unlikely(!*ppages)) {
|
|
|
+ /* XXX: Certain upper layer operations do
|
|
|
+ * not provide receive buffer pages.
|
|
|
+ */
|
|
|
+ *ppages = alloc_page(GFP_ATOMIC);
|
|
|
+ if (!*ppages)
|
|
|
return -EAGAIN;
|
|
|
}
|
|
|
- seg[n].mr_page = ppages[p];
|
|
|
- seg[n].mr_offset = (void *)(unsigned long) page_base;
|
|
|
- seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
|
|
|
- if (seg[n].mr_len > PAGE_SIZE)
|
|
|
- goto out_overflow;
|
|
|
- len -= seg[n].mr_len;
|
|
|
+ seg->mr_page = *ppages;
|
|
|
+ seg->mr_offset = (char *)page_base;
|
|
|
+ seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
|
|
|
+ len -= seg->mr_len;
|
|
|
+ ++ppages;
|
|
|
+ ++seg;
|
|
|
++n;
|
|
|
- ++p;
|
|
|
- page_base = 0; /* page offset only applies to first page */
|
|
|
+ page_base = 0;
|
|
|
}
|
|
|
|
|
|
- /* Message overflows the seg array */
|
|
|
- if (len && n == RPCRDMA_MAX_SEGS)
|
|
|
- goto out_overflow;
|
|
|
-
|
|
|
/* When encoding a Read chunk, the tail iovec contains an
|
|
|
* XDR pad and may be omitted.
|
|
|
*/
|
|
|
if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
|
|
|
- return n;
|
|
|
+ goto out;
|
|
|
|
|
|
/* When encoding a Write chunk, some servers need to see an
|
|
|
* extra segment for non-XDR-aligned Write chunks. The upper
|
|
@@ -258,30 +253,81 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
|
|
|
* for this purpose.
|
|
|
*/
|
|
|
if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
|
|
|
- return n;
|
|
|
+ goto out;
|
|
|
|
|
|
- if (xdrbuf->tail[0].iov_len) {
|
|
|
- n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n);
|
|
|
- if (n == RPCRDMA_MAX_SEGS)
|
|
|
- goto out_overflow;
|
|
|
- }
|
|
|
+ if (xdrbuf->tail[0].iov_len)
|
|
|
+ seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
|
|
|
|
|
|
+out:
|
|
|
+ if (unlikely(n > RPCRDMA_MAX_SEGS))
|
|
|
+ return -EIO;
|
|
|
return n;
|
|
|
+}
|
|
|
|
|
|
-out_overflow:
|
|
|
- pr_err("rpcrdma: segment array overflow\n");
|
|
|
- return -EIO;
|
|
|
+static inline int
|
|
|
+encode_item_present(struct xdr_stream *xdr)
|
|
|
+{
|
|
|
+ __be32 *p;
|
|
|
+
|
|
|
+ p = xdr_reserve_space(xdr, sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ return -EMSGSIZE;
|
|
|
+
|
|
|
+ *p = xdr_one;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static inline __be32 *
|
|
|
+static inline int
|
|
|
+encode_item_not_present(struct xdr_stream *xdr)
|
|
|
+{
|
|
|
+ __be32 *p;
|
|
|
+
|
|
|
+ p = xdr_reserve_space(xdr, sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ return -EMSGSIZE;
|
|
|
+
|
|
|
+ *p = xdr_zero;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
|
|
|
{
|
|
|
*iptr++ = cpu_to_be32(mw->mw_handle);
|
|
|
*iptr++ = cpu_to_be32(mw->mw_length);
|
|
|
- return xdr_encode_hyper(iptr, mw->mw_offset);
|
|
|
+ xdr_encode_hyper(iptr, mw->mw_offset);
|
|
|
}
|
|
|
|
|
|
-/* XDR-encode the Read list. Supports encoding a list of read
|
|
|
+static int
|
|
|
+encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
|
|
|
+{
|
|
|
+ __be32 *p;
|
|
|
+
|
|
|
+ p = xdr_reserve_space(xdr, 4 * sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ return -EMSGSIZE;
|
|
|
+
|
|
|
+ xdr_encode_rdma_segment(p, mw);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
|
|
|
+ u32 position)
|
|
|
+{
|
|
|
+ __be32 *p;
|
|
|
+
|
|
|
+ p = xdr_reserve_space(xdr, 6 * sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ return -EMSGSIZE;
|
|
|
+
|
|
|
+ *p++ = xdr_one; /* Item present */
|
|
|
+ *p++ = cpu_to_be32(position);
|
|
|
+ xdr_encode_rdma_segment(p, mw);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* Register and XDR encode the Read list. Supports encoding a list of read
|
|
|
* segments that belong to a single read chunk.
|
|
|
*
|
|
|
* Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
|
|
@@ -290,23 +336,20 @@ xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
|
|
|
* N elements, position P (same P for all chunks of same arg!):
|
|
|
* 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
|
|
|
*
|
|
|
- * Returns a pointer to the XDR word in the RDMA header following
|
|
|
- * the end of the Read list, or an error pointer.
|
|
|
+ * Returns zero on success, or a negative errno if a failure occurred.
|
|
|
+ * @xdr is advanced to the next position in the stream.
|
|
|
+ *
|
|
|
+ * Only a single @pos value is currently supported.
|
|
|
*/
|
|
|
-static __be32 *
|
|
|
-rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
|
|
|
- struct rpcrdma_req *req, struct rpc_rqst *rqst,
|
|
|
- __be32 *iptr, enum rpcrdma_chunktype rtype)
|
|
|
+static noinline int
|
|
|
+rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|
|
+ struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
|
|
|
{
|
|
|
+ struct xdr_stream *xdr = &req->rl_stream;
|
|
|
struct rpcrdma_mr_seg *seg;
|
|
|
struct rpcrdma_mw *mw;
|
|
|
unsigned int pos;
|
|
|
- int n, nsegs;
|
|
|
-
|
|
|
- if (rtype == rpcrdma_noch) {
|
|
|
- *iptr++ = xdr_zero; /* item not present */
|
|
|
- return iptr;
|
|
|
- }
|
|
|
+ int nsegs;
|
|
|
|
|
|
pos = rqst->rq_snd_buf.head[0].iov_len;
|
|
|
if (rtype == rpcrdma_areadch)
|
|
@@ -315,40 +358,33 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
|
|
|
nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
|
|
|
rtype, seg);
|
|
|
if (nsegs < 0)
|
|
|
- return ERR_PTR(nsegs);
|
|
|
+ return nsegs;
|
|
|
|
|
|
do {
|
|
|
- n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
|
|
- false, &mw);
|
|
|
- if (n < 0)
|
|
|
- return ERR_PTR(n);
|
|
|
+ seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
|
|
+ false, &mw);
|
|
|
+ if (IS_ERR(seg))
|
|
|
+ return PTR_ERR(seg);
|
|
|
rpcrdma_push_mw(mw, &req->rl_registered);
|
|
|
|
|
|
- *iptr++ = xdr_one; /* item present */
|
|
|
-
|
|
|
- /* All read segments in this chunk
|
|
|
- * have the same "position".
|
|
|
- */
|
|
|
- *iptr++ = cpu_to_be32(pos);
|
|
|
- iptr = xdr_encode_rdma_segment(iptr, mw);
|
|
|
+ if (encode_read_segment(xdr, mw, pos) < 0)
|
|
|
+ return -EMSGSIZE;
|
|
|
|
|
|
dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
|
|
|
rqst->rq_task->tk_pid, __func__, pos,
|
|
|
mw->mw_length, (unsigned long long)mw->mw_offset,
|
|
|
- mw->mw_handle, n < nsegs ? "more" : "last");
|
|
|
+ mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
|
|
|
|
|
|
r_xprt->rx_stats.read_chunk_count++;
|
|
|
- seg += n;
|
|
|
- nsegs -= n;
|
|
|
+ nsegs -= mw->mw_nents;
|
|
|
} while (nsegs);
|
|
|
|
|
|
- /* Finish Read list */
|
|
|
- *iptr++ = xdr_zero; /* Next item not present */
|
|
|
- return iptr;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-/* XDR-encode the Write list. Supports encoding a list containing
|
|
|
- * one array of plain segments that belong to a single write chunk.
|
|
|
+/* Register and XDR encode the Write list. Supports encoding a list
|
|
|
+ * containing one array of plain segments that belong to a single
|
|
|
+ * write chunk.
|
|
|
*
|
|
|
* Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
|
|
|
*
|
|
@@ -356,66 +392,65 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
|
|
|
* N elements:
|
|
|
* 1 - N - HLOO - HLOO - ... - HLOO - 0
|
|
|
*
|
|
|
- * Returns a pointer to the XDR word in the RDMA header following
|
|
|
- * the end of the Write list, or an error pointer.
|
|
|
+ * Returns zero on success, or a negative errno if a failure occurred.
|
|
|
+ * @xdr is advanced to the next position in the stream.
|
|
|
+ *
|
|
|
+ * Only a single Write chunk is currently supported.
|
|
|
*/
|
|
|
-static __be32 *
|
|
|
+static noinline int
|
|
|
rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|
|
- struct rpc_rqst *rqst, __be32 *iptr,
|
|
|
- enum rpcrdma_chunktype wtype)
|
|
|
+ struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
|
|
|
{
|
|
|
+ struct xdr_stream *xdr = &req->rl_stream;
|
|
|
struct rpcrdma_mr_seg *seg;
|
|
|
struct rpcrdma_mw *mw;
|
|
|
- int n, nsegs, nchunks;
|
|
|
+ int nsegs, nchunks;
|
|
|
__be32 *segcount;
|
|
|
|
|
|
- if (wtype != rpcrdma_writech) {
|
|
|
- *iptr++ = xdr_zero; /* no Write list present */
|
|
|
- return iptr;
|
|
|
- }
|
|
|
-
|
|
|
seg = req->rl_segments;
|
|
|
nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
|
|
|
rqst->rq_rcv_buf.head[0].iov_len,
|
|
|
wtype, seg);
|
|
|
if (nsegs < 0)
|
|
|
- return ERR_PTR(nsegs);
|
|
|
+ return nsegs;
|
|
|
|
|
|
- *iptr++ = xdr_one; /* Write list present */
|
|
|
- segcount = iptr++; /* save location of segment count */
|
|
|
+ if (encode_item_present(xdr) < 0)
|
|
|
+ return -EMSGSIZE;
|
|
|
+ segcount = xdr_reserve_space(xdr, sizeof(*segcount));
|
|
|
+ if (unlikely(!segcount))
|
|
|
+ return -EMSGSIZE;
|
|
|
+ /* Actual value encoded below */
|
|
|
|
|
|
nchunks = 0;
|
|
|
do {
|
|
|
- n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
|
|
- true, &mw);
|
|
|
- if (n < 0)
|
|
|
- return ERR_PTR(n);
|
|
|
+ seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
|
|
+ true, &mw);
|
|
|
+ if (IS_ERR(seg))
|
|
|
+ return PTR_ERR(seg);
|
|
|
rpcrdma_push_mw(mw, &req->rl_registered);
|
|
|
|
|
|
- iptr = xdr_encode_rdma_segment(iptr, mw);
|
|
|
+ if (encode_rdma_segment(xdr, mw) < 0)
|
|
|
+ return -EMSGSIZE;
|
|
|
|
|
|
dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
|
|
|
rqst->rq_task->tk_pid, __func__,
|
|
|
mw->mw_length, (unsigned long long)mw->mw_offset,
|
|
|
- mw->mw_handle, n < nsegs ? "more" : "last");
|
|
|
+ mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
|
|
|
|
|
|
r_xprt->rx_stats.write_chunk_count++;
|
|
|
r_xprt->rx_stats.total_rdma_request += seg->mr_len;
|
|
|
nchunks++;
|
|
|
- seg += n;
|
|
|
- nsegs -= n;
|
|
|
+ nsegs -= mw->mw_nents;
|
|
|
} while (nsegs);
|
|
|
|
|
|
/* Update count of segments in this Write chunk */
|
|
|
*segcount = cpu_to_be32(nchunks);
|
|
|
|
|
|
- /* Finish Write list */
|
|
|
- *iptr++ = xdr_zero; /* Next item not present */
|
|
|
- return iptr;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-/* XDR-encode the Reply chunk. Supports encoding an array of plain
|
|
|
- * segments that belong to a single write (reply) chunk.
|
|
|
+/* Register and XDR encode the Reply chunk. Supports encoding an array
|
|
|
+ * of plain segments that belong to a single write (reply) chunk.
|
|
|
*
|
|
|
* Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
|
|
|
*
|
|
@@ -423,58 +458,57 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|
|
* N elements:
|
|
|
* 1 - N - HLOO - HLOO - ... - HLOO
|
|
|
*
|
|
|
- * Returns a pointer to the XDR word in the RDMA header following
|
|
|
- * the end of the Reply chunk, or an error pointer.
|
|
|
+ * Returns zero on success, or a negative errno if a failure occurred.
|
|
|
+ * @xdr is advanced to the next position in the stream.
|
|
|
*/
|
|
|
-static __be32 *
|
|
|
-rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
|
|
|
- struct rpcrdma_req *req, struct rpc_rqst *rqst,
|
|
|
- __be32 *iptr, enum rpcrdma_chunktype wtype)
|
|
|
+static noinline int
|
|
|
+rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|
|
+ struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
|
|
|
{
|
|
|
+ struct xdr_stream *xdr = &req->rl_stream;
|
|
|
struct rpcrdma_mr_seg *seg;
|
|
|
struct rpcrdma_mw *mw;
|
|
|
- int n, nsegs, nchunks;
|
|
|
+ int nsegs, nchunks;
|
|
|
__be32 *segcount;
|
|
|
|
|
|
- if (wtype != rpcrdma_replych) {
|
|
|
- *iptr++ = xdr_zero; /* no Reply chunk present */
|
|
|
- return iptr;
|
|
|
- }
|
|
|
-
|
|
|
seg = req->rl_segments;
|
|
|
nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
|
|
|
if (nsegs < 0)
|
|
|
- return ERR_PTR(nsegs);
|
|
|
+ return nsegs;
|
|
|
|
|
|
- *iptr++ = xdr_one; /* Reply chunk present */
|
|
|
- segcount = iptr++; /* save location of segment count */
|
|
|
+ if (encode_item_present(xdr) < 0)
|
|
|
+ return -EMSGSIZE;
|
|
|
+ segcount = xdr_reserve_space(xdr, sizeof(*segcount));
|
|
|
+ if (unlikely(!segcount))
|
|
|
+ return -EMSGSIZE;
|
|
|
+ /* Actual value encoded below */
|
|
|
|
|
|
nchunks = 0;
|
|
|
do {
|
|
|
- n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
|
|
- true, &mw);
|
|
|
- if (n < 0)
|
|
|
- return ERR_PTR(n);
|
|
|
+ seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
|
|
+ true, &mw);
|
|
|
+ if (IS_ERR(seg))
|
|
|
+ return PTR_ERR(seg);
|
|
|
rpcrdma_push_mw(mw, &req->rl_registered);
|
|
|
|
|
|
- iptr = xdr_encode_rdma_segment(iptr, mw);
|
|
|
+ if (encode_rdma_segment(xdr, mw) < 0)
|
|
|
+ return -EMSGSIZE;
|
|
|
|
|
|
dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
|
|
|
rqst->rq_task->tk_pid, __func__,
|
|
|
mw->mw_length, (unsigned long long)mw->mw_offset,
|
|
|
- mw->mw_handle, n < nsegs ? "more" : "last");
|
|
|
+ mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
|
|
|
|
|
|
r_xprt->rx_stats.reply_chunk_count++;
|
|
|
r_xprt->rx_stats.total_rdma_request += seg->mr_len;
|
|
|
nchunks++;
|
|
|
- seg += n;
|
|
|
- nsegs -= n;
|
|
|
+ nsegs -= mw->mw_nents;
|
|
|
} while (nsegs);
|
|
|
|
|
|
/* Update count of segments in the Reply chunk */
|
|
|
*segcount = cpu_to_be32(nchunks);
|
|
|
|
|
|
- return iptr;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/* Prepare the RPC-over-RDMA header SGE.
|
|
@@ -651,37 +685,52 @@ rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
|
|
|
req->rl_mapped_sges = 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Marshal a request: the primary job of this routine is to choose
|
|
|
- * the transfer modes. See comments below.
|
|
|
+/**
|
|
|
+ * rpcrdma_marshal_req - Marshal and send one RPC request
|
|
|
+ * @r_xprt: controlling transport
|
|
|
+ * @rqst: RPC request to be marshaled
|
|
|
+ *
|
|
|
+ * For the RPC in "rqst", this function:
|
|
|
+ * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
|
|
|
+ * - Registers Read, Write, and Reply chunks
|
|
|
+ * - Constructs the transport header
|
|
|
+ * - Posts a Send WR to send the transport header and request
|
|
|
*
|
|
|
- * Returns zero on success, otherwise a negative errno.
|
|
|
+ * Returns:
|
|
|
+ * %0 if the RPC was sent successfully,
|
|
|
+ * %-ENOTCONN if the connection was lost,
|
|
|
+ * %-EAGAIN if not enough pages are available for on-demand reply buffer,
|
|
|
+ * %-ENOBUFS if no MRs are available to register chunks,
|
|
|
+ * %-EMSGSIZE if the transport header is too small,
|
|
|
+ * %-EIO if a permanent problem occurred while marshaling.
|
|
|
*/
|
|
|
-
|
|
|
int
|
|
|
-rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|
|
+rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
|
|
|
{
|
|
|
- struct rpc_xprt *xprt = rqst->rq_xprt;
|
|
|
- struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
|
|
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
|
|
+ struct xdr_stream *xdr = &req->rl_stream;
|
|
|
enum rpcrdma_chunktype rtype, wtype;
|
|
|
- struct rpcrdma_msg *headerp;
|
|
|
bool ddp_allowed;
|
|
|
- ssize_t hdrlen;
|
|
|
- size_t rpclen;
|
|
|
- __be32 *iptr;
|
|
|
+ __be32 *p;
|
|
|
+ int ret;
|
|
|
|
|
|
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
|
|
if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
|
|
|
return rpcrdma_bc_marshal_reply(rqst);
|
|
|
#endif
|
|
|
|
|
|
- headerp = rdmab_to_msg(req->rl_rdmabuf);
|
|
|
- /* don't byte-swap XID, it's already done in request */
|
|
|
- headerp->rm_xid = rqst->rq_xid;
|
|
|
- headerp->rm_vers = rpcrdma_version;
|
|
|
- headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
|
|
|
- headerp->rm_type = rdma_msg;
|
|
|
+ rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
|
|
|
+ xdr_init_encode(xdr, &req->rl_hdrbuf,
|
|
|
+ req->rl_rdmabuf->rg_base);
|
|
|
+
|
|
|
+ /* Fixed header fields */
|
|
|
+ ret = -EMSGSIZE;
|
|
|
+ p = xdr_reserve_space(xdr, 4 * sizeof(*p));
|
|
|
+ if (!p)
|
|
|
+ goto out_err;
|
|
|
+ *p++ = rqst->rq_xid;
|
|
|
+ *p++ = rpcrdma_version;
|
|
|
+ *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
|
|
|
|
|
|
/* When the ULP employs a GSS flavor that guarantees integrity
|
|
|
* or privacy, direct data placement of individual data items
|
|
@@ -721,22 +770,17 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|
|
* by themselves are larger than the inline threshold.
|
|
|
*/
|
|
|
if (rpcrdma_args_inline(r_xprt, rqst)) {
|
|
|
+ *p++ = rdma_msg;
|
|
|
rtype = rpcrdma_noch;
|
|
|
- rpclen = rqst->rq_snd_buf.len;
|
|
|
} else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
|
|
|
+ *p++ = rdma_msg;
|
|
|
rtype = rpcrdma_readch;
|
|
|
- rpclen = rqst->rq_snd_buf.head[0].iov_len +
|
|
|
- rqst->rq_snd_buf.tail[0].iov_len;
|
|
|
} else {
|
|
|
r_xprt->rx_stats.nomsg_call_count++;
|
|
|
- headerp->rm_type = htonl(RDMA_NOMSG);
|
|
|
+ *p++ = rdma_nomsg;
|
|
|
rtype = rpcrdma_areadch;
|
|
|
- rpclen = 0;
|
|
|
}
|
|
|
|
|
|
- req->rl_xid = rqst->rq_xid;
|
|
|
- rpcrdma_insert_req(&r_xprt->rx_buf, req);
|
|
|
-
|
|
|
/* This implementation supports the following combinations
|
|
|
* of chunk lists in one RPC-over-RDMA Call message:
|
|
|
*
|
|
@@ -759,79 +803,50 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|
|
* send a Call message with a Position Zero Read chunk and a
|
|
|
* regular Read chunk at the same time.
|
|
|
*/
|
|
|
- iptr = headerp->rm_body.rm_chunks;
|
|
|
- iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype);
|
|
|
- if (IS_ERR(iptr))
|
|
|
+ if (rtype != rpcrdma_noch) {
|
|
|
+ ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
|
|
|
+ if (ret)
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
+ ret = encode_item_not_present(xdr);
|
|
|
+ if (ret)
|
|
|
goto out_err;
|
|
|
- iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype);
|
|
|
- if (IS_ERR(iptr))
|
|
|
+
|
|
|
+ if (wtype == rpcrdma_writech) {
|
|
|
+ ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
|
|
|
+ if (ret)
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
+ ret = encode_item_not_present(xdr);
|
|
|
+ if (ret)
|
|
|
goto out_err;
|
|
|
- iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype);
|
|
|
- if (IS_ERR(iptr))
|
|
|
+
|
|
|
+ if (wtype != rpcrdma_replych)
|
|
|
+ ret = encode_item_not_present(xdr);
|
|
|
+ else
|
|
|
+ ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
|
|
|
+ if (ret)
|
|
|
goto out_err;
|
|
|
- hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
|
|
|
|
|
|
- dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
|
|
|
+ dprintk("RPC: %5u %s: %s/%s: hdrlen %u rpclen\n",
|
|
|
rqst->rq_task->tk_pid, __func__,
|
|
|
transfertypes[rtype], transfertypes[wtype],
|
|
|
- hdrlen, rpclen);
|
|
|
+ xdr_stream_pos(xdr));
|
|
|
|
|
|
- if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen,
|
|
|
+ if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req,
|
|
|
+ xdr_stream_pos(xdr),
|
|
|
&rqst->rq_snd_buf, rtype)) {
|
|
|
- iptr = ERR_PTR(-EIO);
|
|
|
+ ret = -EIO;
|
|
|
goto out_err;
|
|
|
}
|
|
|
return 0;
|
|
|
|
|
|
out_err:
|
|
|
- if (PTR_ERR(iptr) != -ENOBUFS) {
|
|
|
- pr_err("rpcrdma: rpcrdma_marshal_req failed, status %ld\n",
|
|
|
- PTR_ERR(iptr));
|
|
|
+ if (ret != -ENOBUFS) {
|
|
|
+ pr_err("rpcrdma: header marshaling failed (%d)\n", ret);
|
|
|
r_xprt->rx_stats.failed_marshal_count++;
|
|
|
}
|
|
|
- return PTR_ERR(iptr);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Chase down a received write or reply chunklist to get length
|
|
|
- * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
|
|
|
- */
|
|
|
-static int
|
|
|
-rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp)
|
|
|
-{
|
|
|
- unsigned int i, total_len;
|
|
|
- struct rpcrdma_write_chunk *cur_wchunk;
|
|
|
- char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
|
|
|
-
|
|
|
- i = be32_to_cpu(**iptrp);
|
|
|
- cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
|
|
|
- total_len = 0;
|
|
|
- while (i--) {
|
|
|
- struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
|
|
|
- ifdebug(FACILITY) {
|
|
|
- u64 off;
|
|
|
- xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
|
|
|
- dprintk("RPC: %s: chunk %d@0x%016llx:0x%08x\n",
|
|
|
- __func__,
|
|
|
- be32_to_cpu(seg->rs_length),
|
|
|
- (unsigned long long)off,
|
|
|
- be32_to_cpu(seg->rs_handle));
|
|
|
- }
|
|
|
- total_len += be32_to_cpu(seg->rs_length);
|
|
|
- ++cur_wchunk;
|
|
|
- }
|
|
|
- /* check and adjust for properly terminated write chunk */
|
|
|
- if (wrchunk) {
|
|
|
- __be32 *w = (__be32 *) cur_wchunk;
|
|
|
- if (*w++ != xdr_zero)
|
|
|
- return -1;
|
|
|
- cur_wchunk = (struct rpcrdma_write_chunk *) w;
|
|
|
- }
|
|
|
- if ((char *)cur_wchunk > base + rep->rr_len)
|
|
|
- return -1;
|
|
|
-
|
|
|
- *iptrp = (__be32 *) cur_wchunk;
|
|
|
- return total_len;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -949,37 +964,254 @@ rpcrdma_mark_remote_invalidation(struct list_head *mws,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
|
|
/* By convention, backchannel calls arrive via rdma_msg type
|
|
|
* messages, and never populate the chunk lists. This makes
|
|
|
* the RPC/RDMA header small and fixed in size, so it is
|
|
|
* straightforward to check the RPC header's direction field.
|
|
|
*/
|
|
|
static bool
|
|
|
-rpcrdma_is_bcall(struct rpcrdma_msg *headerp)
|
|
|
+rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
|
|
|
+ __be32 xid, __be32 proc)
|
|
|
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
|
|
{
|
|
|
- __be32 *p = (__be32 *)headerp;
|
|
|
+ struct xdr_stream *xdr = &rep->rr_stream;
|
|
|
+ __be32 *p;
|
|
|
|
|
|
- if (headerp->rm_type != rdma_msg)
|
|
|
+ if (proc != rdma_msg)
|
|
|
return false;
|
|
|
- if (headerp->rm_body.rm_chunks[0] != xdr_zero)
|
|
|
+
|
|
|
+ /* Peek at stream contents without advancing. */
|
|
|
+ p = xdr_inline_decode(xdr, 0);
|
|
|
+
|
|
|
+ /* Chunk lists */
|
|
|
+ if (*p++ != xdr_zero)
|
|
|
return false;
|
|
|
- if (headerp->rm_body.rm_chunks[1] != xdr_zero)
|
|
|
+ if (*p++ != xdr_zero)
|
|
|
return false;
|
|
|
- if (headerp->rm_body.rm_chunks[2] != xdr_zero)
|
|
|
+ if (*p++ != xdr_zero)
|
|
|
return false;
|
|
|
|
|
|
- /* sanity */
|
|
|
- if (p[7] != headerp->rm_xid)
|
|
|
+ /* RPC header */
|
|
|
+ if (*p++ != xid)
|
|
|
return false;
|
|
|
- /* call direction */
|
|
|
- if (p[8] != cpu_to_be32(RPC_CALL))
|
|
|
+ if (*p != cpu_to_be32(RPC_CALL))
|
|
|
return false;
|
|
|
|
|
|
+ /* Now that we are sure this is a backchannel call,
|
|
|
+ * advance to the RPC header.
|
|
|
+ */
|
|
|
+ p = xdr_inline_decode(xdr, 3 * sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ goto out_short;
|
|
|
+
|
|
|
+ rpcrdma_bc_receive_call(r_xprt, rep);
|
|
|
+ return true;
|
|
|
+
|
|
|
+out_short:
|
|
|
+ pr_warn("RPC/RDMA short backward direction call\n");
|
|
|
+ if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
|
|
|
+ xprt_disconnect_done(&r_xprt->rx_xprt);
|
|
|
return true;
|
|
|
}
|
|
|
+#else /* CONFIG_SUNRPC_BACKCHANNEL */
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
|
|
|
|
|
|
+static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
|
|
|
+{
|
|
|
+ __be32 *p;
|
|
|
+
|
|
|
+ p = xdr_inline_decode(xdr, 4 * sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ ifdebug(FACILITY) {
|
|
|
+ u64 offset;
|
|
|
+ u32 handle;
|
|
|
+
|
|
|
+ handle = be32_to_cpup(p++);
|
|
|
+ *length = be32_to_cpup(p++);
|
|
|
+ xdr_decode_hyper(p, &offset);
|
|
|
+ dprintk("RPC: %s: segment %u@0x%016llx:0x%08x\n",
|
|
|
+ __func__, *length, (unsigned long long)offset,
|
|
|
+ handle);
|
|
|
+ } else {
|
|
|
+ *length = be32_to_cpup(p + 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
|
|
|
+{
|
|
|
+ u32 segcount, seglength;
|
|
|
+ __be32 *p;
|
|
|
+
|
|
|
+ p = xdr_inline_decode(xdr, sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ *length = 0;
|
|
|
+ segcount = be32_to_cpup(p);
|
|
|
+ while (segcount--) {
|
|
|
+ if (decode_rdma_segment(xdr, &seglength))
|
|
|
+ return -EIO;
|
|
|
+ *length += seglength;
|
|
|
+ }
|
|
|
+
|
|
|
+ dprintk("RPC: %s: segcount=%u, %u bytes\n",
|
|
|
+ __func__, be32_to_cpup(p), *length);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* In RPC-over-RDMA Version One replies, a Read list is never
|
|
|
+ * expected. This decoder is a stub that returns an error if
|
|
|
+ * a Read list is present.
|
|
|
+ */
|
|
|
+static int decode_read_list(struct xdr_stream *xdr)
|
|
|
+{
|
|
|
+ __be32 *p;
|
|
|
+
|
|
|
+ p = xdr_inline_decode(xdr, sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ return -EIO;
|
|
|
+ if (unlikely(*p != xdr_zero))
|
|
|
+ return -EIO;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* Supports only one Write chunk in the Write list
|
|
|
+ */
|
|
|
+static int decode_write_list(struct xdr_stream *xdr, u32 *length)
|
|
|
+{
|
|
|
+ u32 chunklen;
|
|
|
+ bool first;
|
|
|
+ __be32 *p;
|
|
|
+
|
|
|
+ *length = 0;
|
|
|
+ first = true;
|
|
|
+ do {
|
|
|
+ p = xdr_inline_decode(xdr, sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ return -EIO;
|
|
|
+ if (*p == xdr_zero)
|
|
|
+ break;
|
|
|
+ if (!first)
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ if (decode_write_chunk(xdr, &chunklen))
|
|
|
+ return -EIO;
|
|
|
+ *length += chunklen;
|
|
|
+ first = false;
|
|
|
+ } while (true);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
|
|
|
+{
|
|
|
+ __be32 *p;
|
|
|
+
|
|
|
+ p = xdr_inline_decode(xdr, sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ *length = 0;
|
|
|
+ if (*p != xdr_zero)
|
|
|
+ if (decode_write_chunk(xdr, length))
|
|
|
+ return -EIO;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
|
|
|
+ struct rpc_rqst *rqst)
|
|
|
+{
|
|
|
+ struct xdr_stream *xdr = &rep->rr_stream;
|
|
|
+ u32 writelist, replychunk, rpclen;
|
|
|
+ char *base;
|
|
|
+
|
|
|
+ /* Decode the chunk lists */
|
|
|
+ if (decode_read_list(xdr))
|
|
|
+ return -EIO;
|
|
|
+ if (decode_write_list(xdr, &writelist))
|
|
|
+ return -EIO;
|
|
|
+ if (decode_reply_chunk(xdr, &replychunk))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ /* RDMA_MSG sanity checks */
|
|
|
+ if (unlikely(replychunk))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
|
|
|
+ base = (char *)xdr_inline_decode(xdr, 0);
|
|
|
+ rpclen = xdr_stream_remaining(xdr);
|
|
|
+ r_xprt->rx_stats.fixup_copy_count +=
|
|
|
+ rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
|
|
|
+
|
|
|
+ r_xprt->rx_stats.total_rdma_reply += writelist;
|
|
|
+ return rpclen + xdr_align_size(writelist);
|
|
|
+}
|
|
|
+
|
|
|
+static noinline int
|
|
|
+rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
|
|
|
+{
|
|
|
+ struct xdr_stream *xdr = &rep->rr_stream;
|
|
|
+ u32 writelist, replychunk;
|
|
|
+
|
|
|
+ /* Decode the chunk lists */
|
|
|
+ if (decode_read_list(xdr))
|
|
|
+ return -EIO;
|
|
|
+ if (decode_write_list(xdr, &writelist))
|
|
|
+ return -EIO;
|
|
|
+ if (decode_reply_chunk(xdr, &replychunk))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ /* RDMA_NOMSG sanity checks */
|
|
|
+ if (unlikely(writelist))
|
|
|
+ return -EIO;
|
|
|
+ if (unlikely(!replychunk))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ /* Reply chunk buffer already is the reply vector */
|
|
|
+ r_xprt->rx_stats.total_rdma_reply += replychunk;
|
|
|
+ return replychunk;
|
|
|
+}
|
|
|
+
|
|
|
+static noinline int
|
|
|
+rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
|
|
|
+ struct rpc_rqst *rqst)
|
|
|
+{
|
|
|
+ struct xdr_stream *xdr = &rep->rr_stream;
|
|
|
+ __be32 *p;
|
|
|
+
|
|
|
+ p = xdr_inline_decode(xdr, sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ switch (*p) {
|
|
|
+ case err_vers:
|
|
|
+ p = xdr_inline_decode(xdr, 2 * sizeof(*p));
|
|
|
+ if (!p)
|
|
|
+ break;
|
|
|
+ dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
|
|
|
+ rqst->rq_task->tk_pid, __func__,
|
|
|
+ be32_to_cpup(p), be32_to_cpu(*(p + 1)));
|
|
|
+ break;
|
|
|
+ case err_chunk:
|
|
|
+ dprintk("RPC: %5u: %s: server reports header decoding error\n",
|
|
|
+ rqst->rq_task->tk_pid, __func__);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
|
|
|
+ rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
|
|
|
+ }
|
|
|
+
|
|
|
+ r_xprt->rx_stats.bad_reply_count++;
|
|
|
+ return -EREMOTEIO;
|
|
|
+}
|
|
|
+
|
|
|
/* Process received RPC/RDMA messages.
|
|
|
*
|
|
|
* Errors must result in the RPC task either being awakened, or
|
|
@@ -991,51 +1223,48 @@ rpcrdma_reply_handler(struct work_struct *work)
|
|
|
struct rpcrdma_rep *rep =
|
|
|
container_of(work, struct rpcrdma_rep, rr_work);
|
|
|
struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
|
|
|
- struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
|
|
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
|
|
- struct rpcrdma_msg *headerp;
|
|
|
+ struct xdr_stream *xdr = &rep->rr_stream;
|
|
|
struct rpcrdma_req *req;
|
|
|
struct rpc_rqst *rqst;
|
|
|
- __be32 *iptr;
|
|
|
- int rdmalen, status, rmerr;
|
|
|
+ __be32 *p, xid, vers, proc;
|
|
|
unsigned long cwnd;
|
|
|
- struct list_head mws;
|
|
|
+ int status;
|
|
|
|
|
|
dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
|
|
|
|
|
|
- if (rep->rr_len == RPCRDMA_BAD_LEN)
|
|
|
+ if (rep->rr_hdrbuf.head[0].iov_len == 0)
|
|
|
goto out_badstatus;
|
|
|
- if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
|
|
|
+
|
|
|
+ xdr_init_decode(xdr, &rep->rr_hdrbuf,
|
|
|
+ rep->rr_hdrbuf.head[0].iov_base);
|
|
|
+
|
|
|
+ /* Fixed transport header fields */
|
|
|
+ p = xdr_inline_decode(xdr, 4 * sizeof(*p));
|
|
|
+ if (unlikely(!p))
|
|
|
goto out_shortreply;
|
|
|
+ xid = *p++;
|
|
|
+ vers = *p++;
|
|
|
+ p++; /* credits */
|
|
|
+ proc = *p++;
|
|
|
|
|
|
- headerp = rdmab_to_msg(rep->rr_rdmabuf);
|
|
|
-#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
|
|
- if (rpcrdma_is_bcall(headerp))
|
|
|
- goto out_bcall;
|
|
|
-#endif
|
|
|
+ if (rpcrdma_is_bcall(r_xprt, rep, xid, proc))
|
|
|
+ return;
|
|
|
|
|
|
/* Match incoming rpcrdma_rep to an rpcrdma_req to
|
|
|
* get context for handling any incoming chunks.
|
|
|
*/
|
|
|
- spin_lock(&buf->rb_lock);
|
|
|
- req = rpcrdma_lookup_req_locked(&r_xprt->rx_buf,
|
|
|
- headerp->rm_xid);
|
|
|
- if (!req)
|
|
|
- goto out_nomatch;
|
|
|
- if (req->rl_reply)
|
|
|
- goto out_duplicate;
|
|
|
-
|
|
|
- list_replace_init(&req->rl_registered, &mws);
|
|
|
- rpcrdma_mark_remote_invalidation(&mws, rep);
|
|
|
-
|
|
|
- /* Avoid races with signals and duplicate replies
|
|
|
- * by marking this req as matched.
|
|
|
- */
|
|
|
+ spin_lock(&xprt->recv_lock);
|
|
|
+ rqst = xprt_lookup_rqst(xprt, xid);
|
|
|
+ if (!rqst)
|
|
|
+ goto out_norqst;
|
|
|
+ xprt_pin_rqst(rqst);
|
|
|
+ spin_unlock(&xprt->recv_lock);
|
|
|
+ req = rpcr_to_rdmar(rqst);
|
|
|
req->rl_reply = rep;
|
|
|
- spin_unlock(&buf->rb_lock);
|
|
|
|
|
|
dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
|
|
|
- __func__, rep, req, be32_to_cpu(headerp->rm_xid));
|
|
|
+ __func__, rep, req, be32_to_cpu(xid));
|
|
|
|
|
|
/* Invalidate and unmap the data payloads before waking the
|
|
|
* waiting application. This guarantees the memory regions
|
|
@@ -1044,99 +1273,42 @@ rpcrdma_reply_handler(struct work_struct *work)
|
|
|
* waking the next RPC waits until this RPC has relinquished
|
|
|
* all its Send Queue entries.
|
|
|
*/
|
|
|
- if (!list_empty(&mws))
|
|
|
- r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &mws);
|
|
|
+ if (!list_empty(&req->rl_registered)) {
|
|
|
+ rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
|
|
|
+ r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
|
|
|
+ &req->rl_registered);
|
|
|
+ }
|
|
|
|
|
|
- /* Perform XID lookup, reconstruction of the RPC reply, and
|
|
|
- * RPC completion while holding the transport lock to ensure
|
|
|
- * the rep, rqst, and rq_task pointers remain stable.
|
|
|
- */
|
|
|
- spin_lock_bh(&xprt->transport_lock);
|
|
|
- rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
|
|
|
- if (!rqst)
|
|
|
- goto out_norqst;
|
|
|
xprt->reestablish_timeout = 0;
|
|
|
- if (headerp->rm_vers != rpcrdma_version)
|
|
|
+ if (vers != rpcrdma_version)
|
|
|
goto out_badversion;
|
|
|
|
|
|
- /* check for expected message types */
|
|
|
- /* The order of some of these tests is important. */
|
|
|
- switch (headerp->rm_type) {
|
|
|
+ switch (proc) {
|
|
|
case rdma_msg:
|
|
|
- /* never expect read chunks */
|
|
|
- /* never expect reply chunks (two ways to check) */
|
|
|
- if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
|
|
|
- (headerp->rm_body.rm_chunks[1] == xdr_zero &&
|
|
|
- headerp->rm_body.rm_chunks[2] != xdr_zero))
|
|
|
- goto badheader;
|
|
|
- if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
|
|
|
- /* count any expected write chunks in read reply */
|
|
|
- /* start at write chunk array count */
|
|
|
- iptr = &headerp->rm_body.rm_chunks[2];
|
|
|
- rdmalen = rpcrdma_count_chunks(rep, 1, &iptr);
|
|
|
- /* check for validity, and no reply chunk after */
|
|
|
- if (rdmalen < 0 || *iptr++ != xdr_zero)
|
|
|
- goto badheader;
|
|
|
- rep->rr_len -=
|
|
|
- ((unsigned char *)iptr - (unsigned char *)headerp);
|
|
|
- status = rep->rr_len + rdmalen;
|
|
|
- r_xprt->rx_stats.total_rdma_reply += rdmalen;
|
|
|
- /* special case - last chunk may omit padding */
|
|
|
- if (rdmalen &= 3) {
|
|
|
- rdmalen = 4 - rdmalen;
|
|
|
- status += rdmalen;
|
|
|
- }
|
|
|
- } else {
|
|
|
- /* else ordinary inline */
|
|
|
- rdmalen = 0;
|
|
|
- iptr = (__be32 *)((unsigned char *)headerp +
|
|
|
- RPCRDMA_HDRLEN_MIN);
|
|
|
- rep->rr_len -= RPCRDMA_HDRLEN_MIN;
|
|
|
- status = rep->rr_len;
|
|
|
- }
|
|
|
-
|
|
|
- r_xprt->rx_stats.fixup_copy_count +=
|
|
|
- rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len,
|
|
|
- rdmalen);
|
|
|
+ status = rpcrdma_decode_msg(r_xprt, rep, rqst);
|
|
|
break;
|
|
|
-
|
|
|
case rdma_nomsg:
|
|
|
- /* never expect read or write chunks, always reply chunks */
|
|
|
- if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
|
|
|
- headerp->rm_body.rm_chunks[1] != xdr_zero ||
|
|
|
- headerp->rm_body.rm_chunks[2] != xdr_one)
|
|
|
- goto badheader;
|
|
|
- iptr = (__be32 *)((unsigned char *)headerp +
|
|
|
- RPCRDMA_HDRLEN_MIN);
|
|
|
- rdmalen = rpcrdma_count_chunks(rep, 0, &iptr);
|
|
|
- if (rdmalen < 0)
|
|
|
- goto badheader;
|
|
|
- r_xprt->rx_stats.total_rdma_reply += rdmalen;
|
|
|
- /* Reply chunk buffer already is the reply vector - no fixup. */
|
|
|
- status = rdmalen;
|
|
|
+ status = rpcrdma_decode_nomsg(r_xprt, rep);
|
|
|
break;
|
|
|
-
|
|
|
case rdma_error:
|
|
|
- goto out_rdmaerr;
|
|
|
-
|
|
|
-badheader:
|
|
|
+ status = rpcrdma_decode_error(r_xprt, rep, rqst);
|
|
|
+ break;
|
|
|
default:
|
|
|
- dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
|
|
|
- rqst->rq_task->tk_pid, __func__,
|
|
|
- be32_to_cpu(headerp->rm_type));
|
|
|
status = -EIO;
|
|
|
- r_xprt->rx_stats.bad_reply_count++;
|
|
|
- break;
|
|
|
}
|
|
|
+ if (status < 0)
|
|
|
+ goto out_badheader;
|
|
|
|
|
|
out:
|
|
|
+ spin_lock(&xprt->recv_lock);
|
|
|
cwnd = xprt->cwnd;
|
|
|
xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
|
|
|
if (xprt->cwnd > cwnd)
|
|
|
xprt_release_rqst_cong(rqst->rq_task);
|
|
|
|
|
|
xprt_complete_rqst(rqst->rq_task, status);
|
|
|
- spin_unlock_bh(&xprt->transport_lock);
|
|
|
+ xprt_unpin_rqst(rqst);
|
|
|
+ spin_unlock(&xprt->recv_lock);
|
|
|
dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
|
|
|
__func__, xprt, rqst, status);
|
|
|
return;
|
|
@@ -1149,72 +1321,38 @@ out_badstatus:
|
|
|
}
|
|
|
return;
|
|
|
|
|
|
-#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
|
|
-out_bcall:
|
|
|
- rpcrdma_bc_receive_call(r_xprt, rep);
|
|
|
- return;
|
|
|
-#endif
|
|
|
-
|
|
|
/* If the incoming reply terminated a pending RPC, the next
|
|
|
* RPC call will post a replacement receive buffer as it is
|
|
|
* being marshaled.
|
|
|
*/
|
|
|
out_badversion:
|
|
|
dprintk("RPC: %s: invalid version %d\n",
|
|
|
- __func__, be32_to_cpu(headerp->rm_vers));
|
|
|
+ __func__, be32_to_cpu(vers));
|
|
|
status = -EIO;
|
|
|
r_xprt->rx_stats.bad_reply_count++;
|
|
|
goto out;
|
|
|
|
|
|
-out_rdmaerr:
|
|
|
- rmerr = be32_to_cpu(headerp->rm_body.rm_error.rm_err);
|
|
|
- switch (rmerr) {
|
|
|
- case ERR_VERS:
|
|
|
- pr_err("%s: server reports header version error (%u-%u)\n",
|
|
|
- __func__,
|
|
|
- be32_to_cpu(headerp->rm_body.rm_error.rm_vers_low),
|
|
|
- be32_to_cpu(headerp->rm_body.rm_error.rm_vers_high));
|
|
|
- break;
|
|
|
- case ERR_CHUNK:
|
|
|
- pr_err("%s: server reports header decoding error\n",
|
|
|
- __func__);
|
|
|
- break;
|
|
|
- default:
|
|
|
- pr_err("%s: server reports unknown error %d\n",
|
|
|
- __func__, rmerr);
|
|
|
- }
|
|
|
- status = -EREMOTEIO;
|
|
|
+out_badheader:
|
|
|
+ dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
|
|
|
+ rqst->rq_task->tk_pid, __func__, be32_to_cpu(proc));
|
|
|
r_xprt->rx_stats.bad_reply_count++;
|
|
|
+ status = -EIO;
|
|
|
goto out;
|
|
|
|
|
|
-/* The req was still available, but by the time the transport_lock
|
|
|
+/* The req was still available, but by the time the recv_lock
|
|
|
* was acquired, the rqst and task had been released. Thus the RPC
|
|
|
* has already been terminated.
|
|
|
*/
|
|
|
out_norqst:
|
|
|
- spin_unlock_bh(&xprt->transport_lock);
|
|
|
- rpcrdma_buffer_put(req);
|
|
|
- dprintk("RPC: %s: race, no rqst left for req %p\n",
|
|
|
- __func__, req);
|
|
|
- return;
|
|
|
+ spin_unlock(&xprt->recv_lock);
|
|
|
+ dprintk("RPC: %s: no match for incoming xid 0x%08x\n",
|
|
|
+ __func__, be32_to_cpu(xid));
|
|
|
+ goto repost;
|
|
|
|
|
|
out_shortreply:
|
|
|
dprintk("RPC: %s: short/invalid reply\n", __func__);
|
|
|
goto repost;
|
|
|
|
|
|
-out_nomatch:
|
|
|
- spin_unlock(&buf->rb_lock);
|
|
|
- dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n",
|
|
|
- __func__, be32_to_cpu(headerp->rm_xid),
|
|
|
- rep->rr_len);
|
|
|
- goto repost;
|
|
|
-
|
|
|
-out_duplicate:
|
|
|
- spin_unlock(&buf->rb_lock);
|
|
|
- dprintk("RPC: %s: "
|
|
|
- "duplicate reply %p to RPC request %p: xid 0x%08x\n",
|
|
|
- __func__, rep, req, be32_to_cpu(headerp->rm_xid));
|
|
|
-
|
|
|
/* If no pending RPC transaction was matched, post a replacement
|
|
|
* receive buffer before returning.
|
|
|
*/
|