|
@@ -86,7 +86,9 @@ struct mlx5_modify_raw_qp_param {
|
|
|
u16 operation;
|
|
|
|
|
|
u32 set_mask; /* raw_qp_set_mask_map */
|
|
|
- u32 rate_limit;
|
|
|
+
|
|
|
+ struct mlx5_rate_limit rl;
|
|
|
+
|
|
|
u8 rq_q_ctr_id;
|
|
|
};
|
|
|
|
|
@@ -2774,8 +2776,9 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
|
|
|
const struct mlx5_modify_raw_qp_param *raw_qp_param)
|
|
|
{
|
|
|
struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
|
|
|
- u32 old_rate = ibqp->rate_limit;
|
|
|
- u32 new_rate = old_rate;
|
|
|
+ struct mlx5_rate_limit old_rl = ibqp->rl;
|
|
|
+ struct mlx5_rate_limit new_rl = old_rl;
|
|
|
+ bool new_rate_added = false;
|
|
|
u16 rl_index = 0;
|
|
|
void *in;
|
|
|
void *sqc;
|
|
@@ -2797,39 +2800,43 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
|
|
|
pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
|
|
|
__func__);
|
|
|
else
|
|
|
- new_rate = raw_qp_param->rate_limit;
|
|
|
+ new_rl = raw_qp_param->rl;
|
|
|
}
|
|
|
|
|
|
- if (old_rate != new_rate) {
|
|
|
- if (new_rate) {
|
|
|
- err = mlx5_rl_add_rate(dev, new_rate, &rl_index);
|
|
|
+ if (!mlx5_rl_are_equal(&old_rl, &new_rl)) {
|
|
|
+ if (new_rl.rate) {
|
|
|
+ err = mlx5_rl_add_rate(dev, &rl_index, &new_rl);
|
|
|
if (err) {
|
|
|
- pr_err("Failed configuring rate %u: %d\n",
|
|
|
- new_rate, err);
|
|
|
+ pr_err("Failed configuring rate limit(err %d): \
|
|
|
+ rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
|
|
|
+ err, new_rl.rate, new_rl.max_burst_sz,
|
|
|
+ new_rl.typical_pkt_sz);
|
|
|
+
|
|
|
goto out;
|
|
|
}
|
|
|
+ new_rate_added = true;
|
|
|
}
|
|
|
|
|
|
MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
|
|
|
+ /* index 0 means no limit */
|
|
|
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
|
|
|
}
|
|
|
|
|
|
err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
|
|
|
if (err) {
|
|
|
/* Remove new rate from table if failed */
|
|
|
- if (new_rate &&
|
|
|
- old_rate != new_rate)
|
|
|
- mlx5_rl_remove_rate(dev, new_rate);
|
|
|
+ if (new_rate_added)
|
|
|
+ mlx5_rl_remove_rate(dev, &new_rl);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
/* Only remove the old rate after new rate was set */
|
|
|
- if ((old_rate &&
|
|
|
- (old_rate != new_rate)) ||
|
|
|
+ if ((old_rl.rate &&
|
|
|
+ !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
|
|
|
(new_state != MLX5_SQC_STATE_RDY))
|
|
|
- mlx5_rl_remove_rate(dev, old_rate);
|
|
|
+ mlx5_rl_remove_rate(dev, &old_rl);
|
|
|
|
|
|
- ibqp->rate_limit = new_rate;
|
|
|
+ ibqp->rl = new_rl;
|
|
|
sq->state = new_state;
|
|
|
|
|
|
out:
|
|
@@ -2906,7 +2913,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
|
|
|
|
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
|
|
const struct ib_qp_attr *attr, int attr_mask,
|
|
|
- enum ib_qp_state cur_state, enum ib_qp_state new_state)
|
|
|
+ enum ib_qp_state cur_state, enum ib_qp_state new_state,
|
|
|
+ const struct mlx5_ib_modify_qp *ucmd)
|
|
|
{
|
|
|
static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
|
|
|
[MLX5_QP_STATE_RST] = {
|
|
@@ -3144,7 +3152,30 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
|
|
}
|
|
|
|
|
|
if (attr_mask & IB_QP_RATE_LIMIT) {
|
|
|
- raw_qp_param.rate_limit = attr->rate_limit;
|
|
|
+ raw_qp_param.rl.rate = attr->rate_limit;
|
|
|
+
|
|
|
+ if (ucmd->burst_info.max_burst_sz) {
|
|
|
+ if (attr->rate_limit &&
|
|
|
+ MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) {
|
|
|
+ raw_qp_param.rl.max_burst_sz =
|
|
|
+ ucmd->burst_info.max_burst_sz;
|
|
|
+ } else {
|
|
|
+ err = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ucmd->burst_info.typical_pkt_sz) {
|
|
|
+ if (attr->rate_limit &&
|
|
|
+ MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) {
|
|
|
+ raw_qp_param.rl.typical_pkt_sz =
|
|
|
+ ucmd->burst_info.typical_pkt_sz;
|
|
|
+ } else {
|
|
|
+ err = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
|
|
|
}
|
|
|
|
|
@@ -3332,8 +3363,10 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|
|
{
|
|
|
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
|
|
|
struct mlx5_ib_qp *qp = to_mqp(ibqp);
|
|
|
+ struct mlx5_ib_modify_qp ucmd = {};
|
|
|
enum ib_qp_type qp_type;
|
|
|
enum ib_qp_state cur_state, new_state;
|
|
|
+ size_t required_cmd_sz;
|
|
|
int err = -EINVAL;
|
|
|
int port;
|
|
|
enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
|
|
@@ -3341,6 +3374,28 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|
|
if (ibqp->rwq_ind_tbl)
|
|
|
return -ENOSYS;
|
|
|
|
|
|
+ if (udata && udata->inlen) {
|
|
|
+ required_cmd_sz = offsetof(typeof(ucmd), reserved) +
|
|
|
+ sizeof(ucmd.reserved);
|
|
|
+ if (udata->inlen < required_cmd_sz)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (udata->inlen > sizeof(ucmd) &&
|
|
|
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
|
|
|
+ udata->inlen - sizeof(ucmd)))
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ if (ib_copy_from_udata(&ucmd, udata,
|
|
|
+ min(udata->inlen, sizeof(ucmd))))
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ if (ucmd.comp_mask ||
|
|
|
+ memchr_inv(&ucmd.reserved, 0, sizeof(ucmd.reserved)) ||
|
|
|
+ memchr_inv(&ucmd.burst_info.reserved, 0,
|
|
|
+ sizeof(ucmd.burst_info.reserved)))
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+ }
|
|
|
+
|
|
|
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
|
|
|
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
|
|
|
|
|
@@ -3421,7 +3476,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
|
|
|
+ err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
|
|
|
+ new_state, &ucmd);
|
|
|
|
|
|
out:
|
|
|
mutex_unlock(&qp->mutex);
|