|
@@ -1256,10 +1256,21 @@ static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
|
|
MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
|
|
MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
|
|
|
+ struct mlx5_ib_rq *rq,
|
|
|
|
+ u32 qp_flags_en)
|
|
|
|
+{
|
|
|
|
+ if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
|
|
|
|
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
|
|
|
|
+ mlx5_ib_disable_lb(dev, false, true);
|
|
|
|
+ mlx5_core_destroy_tir(dev->mdev, rq->tirn);
|
|
|
|
+}
|
|
|
|
+
|
|
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
|
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
|
struct mlx5_ib_rq *rq, u32 tdn,
|
|
struct mlx5_ib_rq *rq, u32 tdn,
|
|
- bool tunnel_offload_en)
|
|
|
|
|
|
+ u32 *qp_flags_en)
|
|
{
|
|
{
|
|
|
|
+ u8 lb_flag = 0;
|
|
u32 *in;
|
|
u32 *in;
|
|
void *tirc;
|
|
void *tirc;
|
|
int inlen;
|
|
int inlen;
|
|
@@ -1274,26 +1285,35 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
|
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
|
|
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
|
|
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
|
|
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
|
|
MLX5_SET(tirc, tirc, transport_domain, tdn);
|
|
MLX5_SET(tirc, tirc, transport_domain, tdn);
|
|
- if (tunnel_offload_en)
|
|
|
|
|
|
+ if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
|
|
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
|
|
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
|
|
|
|
|
|
- if (dev->rep)
|
|
|
|
- MLX5_SET(tirc, tirc, self_lb_block,
|
|
|
|
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
|
|
|
|
|
|
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
|
|
|
|
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
|
|
|
|
+
|
|
|
|
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
|
|
|
|
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
|
|
|
|
+
|
|
|
|
+ if (dev->rep) {
|
|
|
|
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
|
|
|
|
+ *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
|
|
|
|
|
|
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
|
|
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
|
|
|
|
|
|
|
|
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
|
|
|
|
+ err = mlx5_ib_enable_lb(dev, false, true);
|
|
|
|
+
|
|
|
|
+ if (err)
|
|
|
|
+ destroy_raw_packet_qp_tir(dev, rq, 0);
|
|
|
|
+ }
|
|
kvfree(in);
|
|
kvfree(in);
|
|
|
|
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
-static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
|
|
|
- struct mlx5_ib_rq *rq)
|
|
|
|
-{
|
|
|
|
- mlx5_core_destroy_tir(dev->mdev, rq->tirn);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
u32 *in, size_t inlen,
|
|
u32 *in, size_t inlen,
|
|
struct ib_pd *pd)
|
|
struct ib_pd *pd)
|
|
@@ -1332,8 +1352,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
goto err_destroy_sq;
|
|
goto err_destroy_sq;
|
|
|
|
|
|
|
|
|
|
- err = create_raw_packet_qp_tir(dev, rq, tdn,
|
|
|
|
- qp->tunnel_offload_en);
|
|
|
|
|
|
+ err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en);
|
|
if (err)
|
|
if (err)
|
|
goto err_destroy_rq;
|
|
goto err_destroy_rq;
|
|
}
|
|
}
|
|
@@ -1363,7 +1382,7 @@ static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
|
|
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
|
|
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
|
|
|
|
|
|
if (qp->rq.wqe_cnt) {
|
|
if (qp->rq.wqe_cnt) {
|
|
- destroy_raw_packet_qp_tir(dev, rq);
|
|
|
|
|
|
+ destroy_raw_packet_qp_tir(dev, rq, qp->flags_en);
|
|
destroy_raw_packet_qp_rq(dev, rq);
|
|
destroy_raw_packet_qp_rq(dev, rq);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1387,6 +1406,9 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
|
|
|
|
|
|
static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
|
static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
|
|
{
|
|
{
|
|
|
|
+ if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
|
|
|
|
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
|
|
|
|
+ mlx5_ib_disable_lb(dev, false, true);
|
|
mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
|
|
mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1410,6 +1432,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
u32 tdn = mucontext->tdn;
|
|
u32 tdn = mucontext->tdn;
|
|
struct mlx5_ib_create_qp_rss ucmd = {};
|
|
struct mlx5_ib_create_qp_rss ucmd = {};
|
|
size_t required_cmd_sz;
|
|
size_t required_cmd_sz;
|
|
|
|
+ u8 lb_flag = 0;
|
|
|
|
|
|
if (init_attr->qp_type != IB_QPT_RAW_PACKET)
|
|
if (init_attr->qp_type != IB_QPT_RAW_PACKET)
|
|
return -EOPNOTSUPP;
|
|
return -EOPNOTSUPP;
|
|
@@ -1444,7 +1467,9 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
return -EOPNOTSUPP;
|
|
return -EOPNOTSUPP;
|
|
}
|
|
}
|
|
|
|
|
|
- if (ucmd.flags & ~MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
|
|
|
|
|
|
+ if (ucmd.flags & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
|
|
|
|
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
|
|
|
|
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) {
|
|
mlx5_ib_dbg(dev, "invalid flags\n");
|
|
mlx5_ib_dbg(dev, "invalid flags\n");
|
|
return -EOPNOTSUPP;
|
|
return -EOPNOTSUPP;
|
|
}
|
|
}
|
|
@@ -1461,6 +1486,16 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
return -EOPNOTSUPP;
|
|
return -EOPNOTSUPP;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->rep) {
|
|
|
|
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
|
|
|
|
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
|
|
|
|
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
|
|
|
|
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
|
|
|
|
+ }
|
|
|
|
+
|
|
err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
|
|
err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
|
|
if (err) {
|
|
if (err) {
|
|
mlx5_ib_dbg(dev, "copy failed\n");
|
|
mlx5_ib_dbg(dev, "copy failed\n");
|
|
@@ -1484,6 +1519,8 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
|
|
if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
|
|
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
|
|
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
|
|
|
|
|
|
|
|
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
|
|
|
|
+
|
|
if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
|
|
if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
|
|
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
|
|
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
|
|
else
|
|
else
|
|
@@ -1580,12 +1617,15 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
|
|
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
|
|
|
|
|
|
create_tir:
|
|
create_tir:
|
|
- if (dev->rep)
|
|
|
|
- MLX5_SET(tirc, tirc, self_lb_block,
|
|
|
|
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
|
|
|
|
-
|
|
|
|
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
|
|
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
|
|
|
|
|
|
|
|
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
|
|
|
|
+ err = mlx5_ib_enable_lb(dev, false, true);
|
|
|
|
+
|
|
|
|
+ if (err)
|
|
|
|
+ mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
|
|
|
|
+ }
|
|
|
|
+
|
|
if (err)
|
|
if (err)
|
|
goto err;
|
|
goto err;
|
|
|
|
|
|
@@ -1710,7 +1750,23 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|
mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
|
|
mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
|
|
return -EOPNOTSUPP;
|
|
return -EOPNOTSUPP;
|
|
}
|
|
}
|
|
- qp->tunnel_offload_en = true;
|
|
|
|
|
|
+ qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
|
|
|
|
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
|
|
|
|
+ mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+ }
|
|
|
|
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
|
|
|
|
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
|
|
|
|
+ mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+ }
|
|
|
|
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
|
|
}
|
|
}
|
|
|
|
|
|
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
|
|
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
|