|
@@ -529,6 +529,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
|
{
|
|
|
struct nvmet_rdma_rsp *rsp =
|
|
|
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
|
|
|
+ struct nvmet_rdma_queue *queue = cq->cq_context;
|
|
|
|
|
|
nvmet_rdma_release_rsp(rsp);
|
|
|
|
|
@@ -536,7 +537,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
|
wc->status != IB_WC_WR_FLUSH_ERR)) {
|
|
|
pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
|
|
|
wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
|
|
|
- nvmet_rdma_error_comp(rsp->queue);
|
|
|
+ nvmet_rdma_error_comp(queue);
|
|
|
}
|
|
|
}
|
|
|
|