|
@@ -790,21 +790,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
|
|
|
+static int ib_to_fw_opcode(int ib_opcode)
|
|
|
+{
|
|
|
+ int opcode;
|
|
|
+
|
|
|
+ switch (ib_opcode) {
|
|
|
+ case IB_WR_SEND_WITH_INV:
|
|
|
+ opcode = FW_RI_SEND_WITH_INV;
|
|
|
+ break;
|
|
|
+ case IB_WR_SEND:
|
|
|
+ opcode = FW_RI_SEND;
|
|
|
+ break;
|
|
|
+ case IB_WR_RDMA_WRITE:
|
|
|
+ opcode = FW_RI_RDMA_WRITE;
|
|
|
+ break;
|
|
|
+ case IB_WR_RDMA_READ:
|
|
|
+ case IB_WR_RDMA_READ_WITH_INV:
|
|
|
+ opcode = FW_RI_READ_REQ;
|
|
|
+ break;
|
|
|
+ case IB_WR_REG_MR:
|
|
|
+ opcode = FW_RI_FAST_REGISTER;
|
|
|
+ break;
|
|
|
+ case IB_WR_LOCAL_INV:
|
|
|
+ opcode = FW_RI_LOCAL_INV;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ opcode = -EINVAL;
|
|
|
+ }
|
|
|
+ return opcode;
|
|
|
+}
|
|
|
+
|
|
|
+static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
|
|
|
{
|
|
|
struct t4_cqe cqe = {};
|
|
|
struct c4iw_cq *schp;
|
|
|
unsigned long flag;
|
|
|
struct t4_cq *cq;
|
|
|
+ int opcode;
|
|
|
|
|
|
schp = to_c4iw_cq(qhp->ibqp.send_cq);
|
|
|
cq = &schp->cq;
|
|
|
|
|
|
+ opcode = ib_to_fw_opcode(wr->opcode);
|
|
|
+ if (opcode < 0)
|
|
|
+ return opcode;
|
|
|
+
|
|
|
cqe.u.drain_cookie = wr->wr_id;
|
|
|
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
|
|
|
- CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
|
|
|
+ CQE_OPCODE_V(opcode) |
|
|
|
CQE_TYPE_V(1) |
|
|
|
CQE_SWCQE_V(1) |
|
|
|
+ CQE_DRAIN_V(1) |
|
|
|
CQE_QPID_V(qhp->wq.sq.qid));
|
|
|
|
|
|
spin_lock_irqsave(&schp->lock, flag);
|
|
@@ -819,6 +855,7 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
|
|
|
schp->ibcq.cq_context);
|
|
|
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
|
|
|
}
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
|
@@ -833,9 +870,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
|
|
|
|
|
cqe.u.drain_cookie = wr->wr_id;
|
|
|
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
|
|
|
- CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
|
|
|
+ CQE_OPCODE_V(FW_RI_SEND) |
|
|
|
CQE_TYPE_V(0) |
|
|
|
CQE_SWCQE_V(1) |
|
|
|
+ CQE_DRAIN_V(1) |
|
|
|
CQE_QPID_V(qhp->wq.sq.qid));
|
|
|
|
|
|
spin_lock_irqsave(&rchp->lock, flag);
|
|
@@ -875,7 +913,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|
|
*/
|
|
|
if (qhp->wq.flushed) {
|
|
|
spin_unlock_irqrestore(&qhp->lock, flag);
|
|
|
- complete_sq_drain_wr(qhp, wr);
|
|
|
+ err = complete_sq_drain_wr(qhp, wr);
|
|
|
return err;
|
|
|
}
|
|
|
num_wrs = t4_sq_avail(&qhp->wq);
|