|
@@ -1053,7 +1053,8 @@ static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
|
|
|
|
|
|
static int is_connected(enum ib_qp_type qp_type)
|
|
|
{
|
|
|
- if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
|
|
|
+ if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
|
|
|
+ qp_type == MLX5_IB_QPT_DCI)
|
|
|
return 1;
|
|
|
|
|
|
return 0;
|
|
@@ -1684,6 +1685,49 @@ err:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
|
|
|
+ void *qpc)
|
|
|
+{
|
|
|
+ int rcqe_sz;
|
|
|
+
|
|
|
+ if (init_attr->qp_type == MLX5_IB_QPT_DCI)
|
|
|
+ return;
|
|
|
+
|
|
|
+ rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
|
|
|
+
|
|
|
+ if (rcqe_sz == 128) {
|
|
|
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (init_attr->qp_type != MLX5_IB_QPT_DCT)
|
|
|
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
|
|
|
+}
|
|
|
+
|
|
|
+static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
|
|
|
+ struct ib_qp_init_attr *init_attr,
|
|
|
+ void *qpc)
|
|
|
+{
|
|
|
+ enum ib_qp_type qpt = init_attr->qp_type;
|
|
|
+ int scqe_sz;
|
|
|
+
|
|
|
+ if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
|
|
|
+ return;
|
|
|
+
|
|
|
+ scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
|
|
|
+ if (scqe_sz == 128) {
|
|
|
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
|
|
|
+ MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
|
|
|
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
|
|
|
+}
|
|
|
+
|
|
|
static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|
|
struct ib_qp_init_attr *init_attr,
|
|
|
struct ib_udata *udata, struct mlx5_ib_qp *qp)
|
|
@@ -1787,7 +1831,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|
|
return err;
|
|
|
|
|
|
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
|
|
|
- qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
|
|
|
+ if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
|
|
|
+ qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
|
|
|
if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
|
|
|
if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
|
|
|
!tunnel_offload_supported(mdev)) {
|
|
@@ -1911,23 +1956,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|
|
MLX5_SET(qpc, qpc, cd_slave_receive, 1);
|
|
|
|
|
|
if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
|
|
|
- int rcqe_sz;
|
|
|
- int scqe_sz;
|
|
|
-
|
|
|
- rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
|
|
|
- scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
|
|
|
-
|
|
|
- if (rcqe_sz == 128)
|
|
|
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
|
|
|
- else
|
|
|
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
|
|
|
-
|
|
|
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
|
|
|
- if (scqe_sz == 128)
|
|
|
- MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
|
|
|
- else
|
|
|
- MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
|
|
|
- }
|
|
|
+ configure_responder_scat_cqe(init_attr, qpc);
|
|
|
+ configure_requester_scat_cqe(dev, init_attr, qpc);
|
|
|
}
|
|
|
|
|
|
if (qp->rq.wqe_cnt) {
|
|
@@ -2302,6 +2332,9 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
|
|
|
MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
|
|
|
MLX5_SET(dctc, dctc, user_index, uidx);
|
|
|
|
|
|
+ if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
|
|
|
+ configure_responder_scat_cqe(attr, dctc);
|
|
|
+
|
|
|
qp->state = IB_QPS_RESET;
|
|
|
|
|
|
return &qp->ibqp;
|