|
@@ -43,6 +43,7 @@
|
|
|
#include <linux/sunrpc/debug.h>
|
|
|
#include <linux/sunrpc/rpc_rdma.h>
|
|
|
#include <linux/spinlock.h>
|
|
|
+#include <linux/highmem.h>
|
|
|
#include <asm/unaligned.h>
|
|
|
#include <rdma/ib_verbs.h>
|
|
|
#include <rdma/rdma_cm.h>
|
|
@@ -435,6 +436,32 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * To avoid a separate RDMA READ just for a handful of zero bytes,
|
|
|
+ * RFC 5666 section 3.7 allows the client to omit the XDR zero pad
|
|
|
+ * in chunk lists.
|
|
|
+ */
|
|
|
+static void
|
|
|
+rdma_fix_xdr_pad(struct xdr_buf *buf)
|
|
|
+{
|
|
|
+ unsigned int page_len = buf->page_len;
|
|
|
+ unsigned int size = (XDR_QUADLEN(page_len) << 2) - page_len;
|
|
|
+ unsigned int offset, pg_no;
|
|
|
+ char *p;
|
|
|
+
|
|
|
+ if (size == 0)
|
|
|
+ return;
|
|
|
+
|
|
|
+ pg_no = page_len >> PAGE_SHIFT;
|
|
|
+ offset = page_len & ~PAGE_MASK;
|
|
|
+ p = page_address(buf->pages[pg_no]);
|
|
|
+ memset(p + offset, 0, size);
|
|
|
+
|
|
|
+ buf->page_len += size;
|
|
|
+ buf->buflen += size;
|
|
|
+ buf->len += size;
|
|
|
+}
|
|
|
+
|
|
|
static int rdma_read_complete(struct svc_rqst *rqstp,
|
|
|
struct svc_rdma_op_ctxt *head)
|
|
|
{
|
|
@@ -449,6 +476,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
|
|
|
rqstp->rq_pages[page_no] = head->pages[page_no];
|
|
|
}
|
|
|
/* Point rq_arg.pages past header */
|
|
|
+ rdma_fix_xdr_pad(&head->arg);
|
|
|
rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
|
|
|
rqstp->rq_arg.page_len = head->arg.page_len;
|
|
|
rqstp->rq_arg.page_base = head->arg.page_base;
|