|
@@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|
|
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
|
|
|
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
|
|
|
case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
|
|
|
+ case MLX5_CMD_OP_2ERR_QP:
|
|
|
+ case MLX5_CMD_OP_2RST_QP:
|
|
|
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
|
|
|
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
|
|
|
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
|
|
|
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
|
|
|
return MLX5_CMD_STAT_OK;
|
|
|
|
|
|
case MLX5_CMD_OP_QUERY_HCA_CAP:
|
|
@@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|
|
case MLX5_CMD_OP_RTR2RTS_QP:
|
|
|
case MLX5_CMD_OP_RTS2RTS_QP:
|
|
|
case MLX5_CMD_OP_SQERR2RTS_QP:
|
|
|
- case MLX5_CMD_OP_2ERR_QP:
|
|
|
- case MLX5_CMD_OP_2RST_QP:
|
|
|
case MLX5_CMD_OP_QUERY_QP:
|
|
|
case MLX5_CMD_OP_SQD_RTS_QP:
|
|
|
case MLX5_CMD_OP_INIT2INIT_QP:
|
|
@@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|
|
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
|
|
|
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
|
|
|
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
|
|
|
- case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
|
|
|
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
|
|
|
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
|
|
|
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
|
|
@@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|
|
case MLX5_CMD_OP_CREATE_RQT:
|
|
|
case MLX5_CMD_OP_MODIFY_RQT:
|
|
|
case MLX5_CMD_OP_QUERY_RQT:
|
|
|
+
|
|
|
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
|
|
|
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
|
|
|
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
|
|
|
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
|
|
|
- case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
|
|
|
+
|
|
|
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
|
|
|
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
|
|
|
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
|
|
@@ -602,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
|
|
|
pr_debug("\n");
|
|
|
}
|
|
|
|
|
|
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
|
|
|
+{
|
|
|
+ struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
|
|
|
+
|
|
|
+ return be16_to_cpu(hdr->opcode);
|
|
|
+}
|
|
|
+
|
|
|
+static void cb_timeout_handler(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
|
|
|
+ work);
|
|
|
+ struct mlx5_cmd_work_ent *ent = container_of(dwork,
|
|
|
+ struct mlx5_cmd_work_ent,
|
|
|
+ cb_timeout_work);
|
|
|
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
|
|
|
+ cmd);
|
|
|
+
|
|
|
+ ent->ret = -ETIMEDOUT;
|
|
|
+ mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
|
|
|
+ mlx5_command_str(msg_to_opcode(ent->in)),
|
|
|
+ msg_to_opcode(ent->in));
|
|
|
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
|
|
|
+}
|
|
|
+
|
|
|
static void cmd_work_handler(struct work_struct *work)
|
|
|
{
|
|
|
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
|
|
|
struct mlx5_cmd *cmd = ent->cmd;
|
|
|
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
|
|
|
+ unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
|
|
|
struct mlx5_cmd_layout *lay;
|
|
|
struct semaphore *sem;
|
|
|
unsigned long flags;
|
|
@@ -647,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
|
|
|
dump_command(dev, ent, 1);
|
|
|
ent->ts1 = ktime_get_ns();
|
|
|
|
|
|
+ if (ent->callback)
|
|
|
+ schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
|
|
|
+
|
|
|
/* ring doorbell after the descriptor is valid */
|
|
|
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
|
|
|
wmb();
|
|
@@ -691,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
|
|
|
-{
|
|
|
- struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
|
|
|
-
|
|
|
- return be16_to_cpu(hdr->opcode);
|
|
|
-}
|
|
|
-
|
|
|
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
|
|
{
|
|
|
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
|
|
@@ -706,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
|
|
|
|
|
if (cmd->mode == CMD_MODE_POLLING) {
|
|
|
wait_for_completion(&ent->done);
|
|
|
- err = ent->ret;
|
|
|
- } else {
|
|
|
- if (!wait_for_completion_timeout(&ent->done, timeout))
|
|
|
- err = -ETIMEDOUT;
|
|
|
- else
|
|
|
- err = 0;
|
|
|
+ } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
|
|
|
+ ent->ret = -ETIMEDOUT;
|
|
|
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
|
|
|
}
|
|
|
+
|
|
|
+ err = ent->ret;
|
|
|
+
|
|
|
if (err == -ETIMEDOUT) {
|
|
|
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
|
|
|
mlx5_command_str(msg_to_opcode(ent->in)),
|
|
@@ -761,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
|
|
if (!callback)
|
|
|
init_completion(&ent->done);
|
|
|
|
|
|
+ INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
|
|
|
INIT_WORK(&ent->work, cmd_work_handler);
|
|
|
if (page_queue) {
|
|
|
cmd_work_handler(&ent->work);
|
|
@@ -770,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
|
|
goto out_free;
|
|
|
}
|
|
|
|
|
|
- if (!callback) {
|
|
|
- err = wait_func(dev, ent);
|
|
|
- if (err == -ETIMEDOUT)
|
|
|
- goto out;
|
|
|
-
|
|
|
- ds = ent->ts2 - ent->ts1;
|
|
|
- op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
|
|
|
- if (op < ARRAY_SIZE(cmd->stats)) {
|
|
|
- stats = &cmd->stats[op];
|
|
|
- spin_lock_irq(&stats->lock);
|
|
|
- stats->sum += ds;
|
|
|
- ++stats->n;
|
|
|
- spin_unlock_irq(&stats->lock);
|
|
|
- }
|
|
|
- mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
|
|
|
- "fw exec time for %s is %lld nsec\n",
|
|
|
- mlx5_command_str(op), ds);
|
|
|
- *status = ent->status;
|
|
|
- free_cmd(ent);
|
|
|
- }
|
|
|
+ if (callback)
|
|
|
+ goto out;
|
|
|
|
|
|
- return err;
|
|
|
+ err = wait_func(dev, ent);
|
|
|
+ if (err == -ETIMEDOUT)
|
|
|
+ goto out_free;
|
|
|
+
|
|
|
+ ds = ent->ts2 - ent->ts1;
|
|
|
+ op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
|
|
|
+ if (op < ARRAY_SIZE(cmd->stats)) {
|
|
|
+ stats = &cmd->stats[op];
|
|
|
+ spin_lock_irq(&stats->lock);
|
|
|
+ stats->sum += ds;
|
|
|
+ ++stats->n;
|
|
|
+ spin_unlock_irq(&stats->lock);
|
|
|
+ }
|
|
|
+ mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
|
|
|
+ "fw exec time for %s is %lld nsec\n",
|
|
|
+ mlx5_command_str(op), ds);
|
|
|
+ *status = ent->status;
|
|
|
|
|
|
out_free:
|
|
|
free_cmd(ent);
|
|
@@ -1181,41 +1205,30 @@ err_dbg:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
|
|
|
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
|
|
|
{
|
|
|
struct mlx5_cmd *cmd = &dev->cmd;
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < cmd->max_reg_cmds; i++)
|
|
|
down(&cmd->sem);
|
|
|
-
|
|
|
down(&cmd->pages_sem);
|
|
|
|
|
|
- flush_workqueue(cmd->wq);
|
|
|
-
|
|
|
- cmd->mode = CMD_MODE_EVENTS;
|
|
|
+ cmd->mode = mode;
|
|
|
|
|
|
up(&cmd->pages_sem);
|
|
|
for (i = 0; i < cmd->max_reg_cmds; i++)
|
|
|
up(&cmd->sem);
|
|
|
}
|
|
|
|
|
|
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
|
|
|
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
|
|
|
{
|
|
|
- struct mlx5_cmd *cmd = &dev->cmd;
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < cmd->max_reg_cmds; i++)
|
|
|
- down(&cmd->sem);
|
|
|
-
|
|
|
- down(&cmd->pages_sem);
|
|
|
-
|
|
|
- flush_workqueue(cmd->wq);
|
|
|
- cmd->mode = CMD_MODE_POLLING;
|
|
|
+ mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
|
|
|
+}
|
|
|
|
|
|
- up(&cmd->pages_sem);
|
|
|
- for (i = 0; i < cmd->max_reg_cmds; i++)
|
|
|
- up(&cmd->sem);
|
|
|
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
|
|
|
+{
|
|
|
+ mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
|
|
|
}
|
|
|
|
|
|
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
|
|
@@ -1251,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
|
|
|
struct semaphore *sem;
|
|
|
|
|
|
ent = cmd->ent_arr[i];
|
|
|
+ if (ent->callback)
|
|
|
+ cancel_delayed_work(&ent->cb_timeout_work);
|
|
|
if (ent->page_queue)
|
|
|
sem = &cmd->pages_sem;
|
|
|
else
|