|
@@ -232,6 +232,9 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
|
|
struct ib_cqe *cqe;
|
|
struct ib_cqe *cqe;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
|
|
+ if (cc->cc_sqecount > rdma->sc_sq_depth)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
first_wr = NULL;
|
|
first_wr = NULL;
|
|
cqe = &cc->cc_cqe;
|
|
cqe = &cc->cc_cqe;
|
|
list_for_each(tmp, &cc->cc_rwctxts) {
|
|
list_for_each(tmp, &cc->cc_rwctxts) {
|
|
@@ -425,6 +428,7 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
|
|
*
|
|
*
|
|
* Returns a non-negative number of bytes the chunk consumed, or
|
|
* Returns a non-negative number of bytes the chunk consumed, or
|
|
* %-E2BIG if the payload was larger than the Write chunk,
|
|
* %-E2BIG if the payload was larger than the Write chunk,
|
|
|
|
+ * %-EINVAL if client provided too many segments,
|
|
* %-ENOMEM if rdma_rw context pool was exhausted,
|
|
* %-ENOMEM if rdma_rw context pool was exhausted,
|
|
* %-ENOTCONN if posting failed (connection is lost),
|
|
* %-ENOTCONN if posting failed (connection is lost),
|
|
* %-EIO if rdma_rw initialization failed (DMA mapping, etc).
|
|
* %-EIO if rdma_rw initialization failed (DMA mapping, etc).
|
|
@@ -465,6 +469,7 @@ out_err:
|
|
*
|
|
*
|
|
* Returns a non-negative number of bytes the chunk consumed, or
|
|
* Returns a non-negative number of bytes the chunk consumed, or
|
|
* %-E2BIG if the payload was larger than the Reply chunk,
|
|
* %-E2BIG if the payload was larger than the Reply chunk,
|
|
|
|
+ * %-EINVAL if client provided too many segments,
|
|
* %-ENOMEM if rdma_rw context pool was exhausted,
|
|
* %-ENOMEM if rdma_rw context pool was exhausted,
|
|
* %-ENOTCONN if posting failed (connection is lost),
|
|
* %-ENOTCONN if posting failed (connection is lost),
|
|
* %-EIO if rdma_rw initialization failed (DMA mapping, etc).
|
|
* %-EIO if rdma_rw initialization failed (DMA mapping, etc).
|