|
@@ -1535,6 +1535,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
|
|
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
|
|
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
|
|
u8 log_pmtu;
|
|
u8 log_pmtu;
|
|
int ret;
|
|
int ret;
|
|
|
|
+ size_t cplen;
|
|
|
|
|
|
BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
|
|
BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
|
|
|
|
|
|
@@ -1542,32 +1543,11 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
|
|
if (unlikely(wr->num_sge > qp->s_max_sge))
|
|
if (unlikely(wr->num_sge > qp->s_max_sge))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
- /*
|
|
|
|
- * Don't allow RDMA reads or atomic operations on UC or
|
|
|
|
- * undefined operations.
|
|
|
|
- * Make sure buffer is large enough to hold the result for atomics.
|
|
|
|
- */
|
|
|
|
- if (qp->ibqp.qp_type == IB_QPT_UC) {
|
|
|
|
- if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
|
|
|
|
- return -EINVAL;
|
|
|
|
- } else if (qp->ibqp.qp_type != IB_QPT_RC) {
|
|
|
|
- /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
|
|
|
|
- if (wr->opcode != IB_WR_SEND &&
|
|
|
|
- wr->opcode != IB_WR_SEND_WITH_IMM)
|
|
|
|
- return -EINVAL;
|
|
|
|
- /* Check UD destination address PD */
|
|
|
|
- if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
|
|
|
|
- return -EINVAL;
|
|
|
|
- } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
|
|
|
|
- return -EINVAL;
|
|
|
|
- } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
|
|
|
|
- (wr->num_sge == 0 ||
|
|
|
|
- wr->sg_list[0].length < sizeof(u64) ||
|
|
|
|
- wr->sg_list[0].addr & (sizeof(u64) - 1))) {
|
|
|
|
- return -EINVAL;
|
|
|
|
- } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
|
|
+ ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ return ret;
|
|
|
|
+ cplen = ret;
|
|
|
|
+
|
|
/* check for avail */
|
|
/* check for avail */
|
|
if (unlikely(!qp->s_avail)) {
|
|
if (unlikely(!qp->s_avail)) {
|
|
qp->s_avail = qp_get_savail(qp);
|
|
qp->s_avail = qp_get_savail(qp);
|
|
@@ -1588,18 +1568,8 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
|
|
pd = ibpd_to_rvtpd(qp->ibqp.pd);
|
|
pd = ibpd_to_rvtpd(qp->ibqp.pd);
|
|
wqe = rvt_get_swqe_ptr(qp, qp->s_head);
|
|
wqe = rvt_get_swqe_ptr(qp, qp->s_head);
|
|
|
|
|
|
- if (qp->ibqp.qp_type != IB_QPT_UC &&
|
|
|
|
- qp->ibqp.qp_type != IB_QPT_RC)
|
|
|
|
- memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
|
|
|
|
- else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
|
|
|
|
- wr->opcode == IB_WR_RDMA_WRITE ||
|
|
|
|
- wr->opcode == IB_WR_RDMA_READ)
|
|
|
|
- memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
|
|
|
|
- else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
|
|
|
|
- wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
|
|
|
|
- memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
|
|
|
|
- else
|
|
|
|
- memcpy(&wqe->wr, wr, sizeof(wqe->wr));
|
|
|
|
|
|
+ /* cplen has length from above */
|
|
|
|
+ memcpy(&wqe->wr, wr, cplen);
|
|
|
|
|
|
wqe->length = 0;
|
|
wqe->length = 0;
|
|
j = 0;
|
|
j = 0;
|