|
@@ -54,6 +54,7 @@ enum {
|
|
|
|
|
|
enum {
|
|
|
MLX5_IB_SQ_STRIDE = 6,
|
|
|
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
|
|
|
};
|
|
|
|
|
|
static const u32 mlx5_ib_opcode[] = {
|
|
@@ -298,7 +299,9 @@ static int sq_overhead(struct ib_qp_init_attr *attr)
|
|
|
max(sizeof(struct mlx5_wqe_atomic_seg) +
|
|
|
sizeof(struct mlx5_wqe_raddr_seg),
|
|
|
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
|
|
|
- sizeof(struct mlx5_mkey_seg));
|
|
|
+ sizeof(struct mlx5_mkey_seg) +
|
|
|
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD /
|
|
|
+ MLX5_IB_UMR_OCTOWORD);
|
|
|
break;
|
|
|
|
|
|
case IB_QPT_XRC_TGT:
|
|
@@ -3633,13 +3636,15 @@ static __be64 sig_mkey_mask(void)
|
|
|
}
|
|
|
|
|
|
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
|
|
|
- struct mlx5_ib_mr *mr)
|
|
|
+ struct mlx5_ib_mr *mr, bool umr_inline)
|
|
|
{
|
|
|
int size = mr->ndescs * mr->desc_size;
|
|
|
|
|
|
memset(umr, 0, sizeof(*umr));
|
|
|
|
|
|
umr->flags = MLX5_UMR_CHECK_NOT_FREE;
|
|
|
+ if (umr_inline)
|
|
|
+ umr->flags |= MLX5_UMR_INLINE;
|
|
|
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
|
|
|
umr->mkey_mask = frwr_mkey_mask();
|
|
|
}
|
|
@@ -3823,6 +3828,24 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
|
|
|
dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
|
|
|
}
|
|
|
|
|
|
+static void set_reg_umr_inline_seg(void *seg, struct mlx5_ib_qp *qp,
|
|
|
+ struct mlx5_ib_mr *mr, int mr_list_size)
|
|
|
+{
|
|
|
+ void *qend = qp->sq.qend;
|
|
|
+ void *addr = mr->descs;
|
|
|
+ int copy;
|
|
|
+
|
|
|
+ if (unlikely(seg + mr_list_size > qend)) {
|
|
|
+ copy = qend - seg;
|
|
|
+ memcpy(seg, addr, copy);
|
|
|
+ addr += copy;
|
|
|
+ mr_list_size -= copy;
|
|
|
+ seg = mlx5_get_send_wqe(qp, 0);
|
|
|
+ }
|
|
|
+ memcpy(seg, addr, mr_list_size);
|
|
|
+ seg += mr_list_size;
|
|
|
+}
|
|
|
+
|
|
|
static __be32 send_ieth(struct ib_send_wr *wr)
|
|
|
{
|
|
|
switch (wr->opcode) {
|
|
@@ -4217,6 +4240,8 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
|
|
|
{
|
|
|
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
|
|
|
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
|
|
|
+ int mr_list_size = mr->ndescs * mr->desc_size;
|
|
|
+ bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
|
|
|
|
|
|
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
|
|
|
mlx5_ib_warn(to_mdev(qp->ibqp.device),
|
|
@@ -4224,7 +4249,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- set_reg_umr_seg(*seg, mr);
|
|
|
+ set_reg_umr_seg(*seg, mr, umr_inline);
|
|
|
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
|
|
|
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
|
|
|
if (unlikely((*seg == qp->sq.qend)))
|
|
@@ -4236,10 +4261,14 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
|
|
|
if (unlikely((*seg == qp->sq.qend)))
|
|
|
*seg = mlx5_get_send_wqe(qp, 0);
|
|
|
|
|
|
- set_reg_data_seg(*seg, mr, pd);
|
|
|
- *seg += sizeof(struct mlx5_wqe_data_seg);
|
|
|
- *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
|
|
|
-
|
|
|
+ if (umr_inline) {
|
|
|
+ set_reg_umr_inline_seg(*seg, qp, mr, mr_list_size);
|
|
|
+ *size += get_xlt_octo(mr_list_size);
|
|
|
+ } else {
|
|
|
+ set_reg_data_seg(*seg, mr, pd);
|
|
|
+ *seg += sizeof(struct mlx5_wqe_data_seg);
|
|
|
+ *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
|
|
|
+ }
|
|
|
return 0;
|
|
|
}
|
|
|
|