|
|
@@ -1657,3 +1657,167 @@ next_page:
|
|
|
return i;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ib_sg_to_pages);
|
|
|
+
|
|
|
+struct ib_drain_cqe {
|
|
|
+ struct ib_cqe cqe;
|
|
|
+ struct completion done;
|
|
|
+};
|
|
|
+
|
|
|
+static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
|
+{
|
|
|
+ struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
|
|
|
+ cqe);
|
|
|
+
|
|
|
+ complete(&cqe->done);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Post a WR and block until its completion is reaped for the SQ.
|
|
|
+ */
|
|
|
+static void __ib_drain_sq(struct ib_qp *qp)
|
|
|
+{
|
|
|
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
|
|
|
+ struct ib_drain_cqe sdrain;
|
|
|
+ struct ib_send_wr swr = {}, *bad_swr;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
|
|
|
+ WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
|
|
|
+ "IB_POLL_DIRECT poll_ctx not supported for drain\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ swr.wr_cqe = &sdrain.cqe;
|
|
|
+ sdrain.cqe.done = ib_drain_qp_done;
|
|
|
+ init_completion(&sdrain.done);
|
|
|
+
|
|
|
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
|
|
|
+ if (ret) {
|
|
|
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = ib_post_send(qp, &swr, &bad_swr);
|
|
|
+ if (ret) {
|
|
|
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ wait_for_completion(&sdrain.done);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Post a WR and block until its completion is reaped for the RQ.
|
|
|
+ */
|
|
|
+static void __ib_drain_rq(struct ib_qp *qp)
|
|
|
+{
|
|
|
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
|
|
|
+ struct ib_drain_cqe rdrain;
|
|
|
+ struct ib_recv_wr rwr = {}, *bad_rwr;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
|
|
|
+ WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
|
|
|
+ "IB_POLL_DIRECT poll_ctx not supported for drain\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ rwr.wr_cqe = &rdrain.cqe;
|
|
|
+ rdrain.cqe.done = ib_drain_qp_done;
|
|
|
+ init_completion(&rdrain.done);
|
|
|
+
|
|
|
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
|
|
|
+ if (ret) {
|
|
|
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = ib_post_recv(qp, &rwr, &bad_rwr);
|
|
|
+ if (ret) {
|
|
|
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ wait_for_completion(&rdrain.done);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
|
|
|
+ * application.
|
|
|
+ * @qp: queue pair to drain
|
|
|
+ *
|
|
|
+ * If the device has a provider-specific drain function, then
|
|
|
+ * call that. Otherwise call the generic drain function
|
|
|
+ * __ib_drain_sq().
|
|
|
+ *
|
|
|
+ * The caller must:
|
|
|
+ *
|
|
|
+ * ensure there is room in the CQ and SQ for the drain work request and
|
|
|
+ * completion.
|
|
|
+ *
|
|
|
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
|
|
|
+ * IB_POLL_DIRECT.
|
|
|
+ *
|
|
|
+ * ensure that there are no other contexts that are posting WRs concurrently.
|
|
|
+ * Otherwise the drain is not guaranteed.
|
|
|
+ */
|
|
|
+void ib_drain_sq(struct ib_qp *qp)
|
|
|
+{
|
|
|
+ if (qp->device->drain_sq)
|
|
|
+ qp->device->drain_sq(qp);
|
|
|
+ else
|
|
|
+ __ib_drain_sq(qp);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(ib_drain_sq);
|
|
|
+
|
|
|
+/**
|
|
|
+ * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
|
|
|
+ * application.
|
|
|
+ * @qp: queue pair to drain
|
|
|
+ *
|
|
|
+ * If the device has a provider-specific drain function, then
|
|
|
+ * call that. Otherwise call the generic drain function
|
|
|
+ * __ib_drain_rq().
|
|
|
+ *
|
|
|
+ * The caller must:
|
|
|
+ *
|
|
|
+ * ensure there is room in the CQ and RQ for the drain work request and
|
|
|
+ * completion.
|
|
|
+ *
|
|
|
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
|
|
|
+ * IB_POLL_DIRECT.
|
|
|
+ *
|
|
|
+ * ensure that there are no other contexts that are posting WRs concurrently.
|
|
|
+ * Otherwise the drain is not guaranteed.
|
|
|
+ */
|
|
|
+void ib_drain_rq(struct ib_qp *qp)
|
|
|
+{
|
|
|
+ if (qp->device->drain_rq)
|
|
|
+ qp->device->drain_rq(qp);
|
|
|
+ else
|
|
|
+ __ib_drain_rq(qp);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(ib_drain_rq);
|
|
|
+
|
|
|
+/**
|
|
|
+ * ib_drain_qp() - Block until all CQEs have been consumed by the
|
|
|
+ * application on both the RQ and SQ.
|
|
|
+ * @qp: queue pair to drain
|
|
|
+ *
|
|
|
+ * The caller must:
|
|
|
+ *
|
|
|
+ * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
|
|
|
+ * and completions.
|
|
|
+ *
|
|
|
+ * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
|
|
|
+ * IB_POLL_DIRECT.
|
|
|
+ *
|
|
|
+ * ensure that there are no other contexts that are posting WRs concurrently.
|
|
|
+ * Otherwise the drain is not guaranteed.
|
|
|
+ */
|
|
|
+void ib_drain_qp(struct ib_qp *qp)
|
|
|
+{
|
|
|
+ ib_drain_sq(qp);
|
|
|
+ ib_drain_rq(qp);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(ib_drain_qp);
|