|
@@ -142,6 +142,7 @@ struct nvme_fc_ctrl {
|
|
|
struct nvme_fc_rport *rport;
|
|
|
u32 cnum;
|
|
|
|
|
|
+ bool ioq_live;
|
|
|
bool assoc_active;
|
|
|
u64 association_id;
|
|
|
|
|
@@ -1470,21 +1471,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
|
|
|
|
|
|
static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
|
|
|
|
|
|
-static int
|
|
|
-nvme_fc_reinit_request(void *data, struct request *rq)
|
|
|
-{
|
|
|
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
|
|
- struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
|
|
|
-
|
|
|
- memset(cmdiu, 0, sizeof(*cmdiu));
|
|
|
- cmdiu->scsi_id = NVME_CMD_SCSI_ID;
|
|
|
- cmdiu->fc_id = NVME_CMD_FC_ID;
|
|
|
- cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
|
|
|
- memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static void
|
|
|
__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
|
|
|
struct nvme_fc_fcp_op *op)
|
|
@@ -1893,6 +1879,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
|
|
|
*/
|
|
|
|
|
|
queue->connection_id = 0;
|
|
|
+ atomic_set(&queue->csn, 1);
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -2279,14 +2266,13 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
|
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
|
|
|
struct nvme_command *sqe = &cmdiu->sqe;
|
|
|
enum nvmefc_fcp_datadir io_dir;
|
|
|
+ bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
|
|
|
u32 data_len;
|
|
|
blk_status_t ret;
|
|
|
|
|
|
- ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
|
|
|
- test_bit(NVME_FC_Q_LIVE, &queue->flags),
|
|
|
- ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE);
|
|
|
- if (unlikely(ret))
|
|
|
- return ret;
|
|
|
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
|
|
|
+ !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
|
|
|
+ return nvmf_fail_nonready_command(rq);
|
|
|
|
|
|
ret = nvme_setup_cmd(ns, rq, sqe);
|
|
|
if (ret)
|
|
@@ -2463,6 +2449,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
|
|
|
if (ret)
|
|
|
goto out_delete_hw_queues;
|
|
|
|
|
|
+ ctrl->ioq_live = true;
|
|
|
+
|
|
|
return 0;
|
|
|
|
|
|
out_delete_hw_queues:
|
|
@@ -2480,7 +2468,7 @@ out_free_tag_set:
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
|
|
|
+nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
|
|
|
{
|
|
|
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
|
|
unsigned int nr_io_queues;
|
|
@@ -2500,12 +2488,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
|
|
|
if (ctrl->ctrl.queue_count == 1)
|
|
|
return 0;
|
|
|
|
|
|
- nvme_fc_init_io_queues(ctrl);
|
|
|
-
|
|
|
- ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
|
|
|
- if (ret)
|
|
|
- goto out_free_io_queues;
|
|
|
-
|
|
|
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
|
|
|
if (ret)
|
|
|
goto out_free_io_queues;
|
|
@@ -2603,8 +2585,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
|
|
* Create the admin queue
|
|
|
*/
|
|
|
|
|
|
- nvme_fc_init_queue(ctrl, 0);
|
|
|
-
|
|
|
ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
|
|
|
NVME_AQ_DEPTH);
|
|
|
if (ret)
|
|
@@ -2615,8 +2595,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
|
|
if (ret)
|
|
|
goto out_delete_hw_queue;
|
|
|
|
|
|
- if (ctrl->ctrl.state != NVME_CTRL_NEW)
|
|
|
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
|
|
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
|
|
|
|
|
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
|
|
|
if (ret)
|
|
@@ -2689,10 +2668,10 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
|
|
*/
|
|
|
|
|
|
if (ctrl->ctrl.queue_count > 1) {
|
|
|
- if (ctrl->ctrl.state == NVME_CTRL_NEW)
|
|
|
+ if (!ctrl->ioq_live)
|
|
|
ret = nvme_fc_create_io_queues(ctrl);
|
|
|
else
|
|
|
- ret = nvme_fc_reinit_io_queues(ctrl);
|
|
|
+ ret = nvme_fc_recreate_io_queues(ctrl);
|
|
|
if (ret)
|
|
|
goto out_term_aen_ops;
|
|
|
}
|
|
@@ -2776,8 +2755,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
|
|
|
* use blk_mq_tagset_busy_itr() and the transport routine to
|
|
|
* terminate the exchanges.
|
|
|
*/
|
|
|
- if (ctrl->ctrl.state != NVME_CTRL_NEW)
|
|
|
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
|
|
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
|
|
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
|
|
|
nvme_fc_terminate_exchange, &ctrl->ctrl);
|
|
|
|
|
@@ -2917,7 +2895,6 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
|
|
|
.submit_async_event = nvme_fc_submit_async_event,
|
|
|
.delete_ctrl = nvme_fc_delete_ctrl,
|
|
|
.get_address = nvmf_get_address,
|
|
|
- .reinit_request = nvme_fc_reinit_request,
|
|
|
};
|
|
|
|
|
|
static void
|
|
@@ -2934,7 +2911,7 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
|
|
|
nvme_fc_reconnect_or_delete(ctrl, ret);
|
|
|
else
|
|
|
dev_info(ctrl->ctrl.device,
|
|
|
- "NVME-FC{%d}: controller reconnect complete\n",
|
|
|
+ "NVME-FC{%d}: controller connect complete\n",
|
|
|
ctrl->cnum);
|
|
|
}
|
|
|
|
|
@@ -2982,7 +2959,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|
|
{
|
|
|
struct nvme_fc_ctrl *ctrl;
|
|
|
unsigned long flags;
|
|
|
- int ret, idx, retry;
|
|
|
+ int ret, idx;
|
|
|
|
|
|
if (!(rport->remoteport.port_role &
|
|
|
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
|
|
@@ -3009,11 +2986,13 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|
|
}
|
|
|
|
|
|
ctrl->ctrl.opts = opts;
|
|
|
+ ctrl->ctrl.nr_reconnects = 0;
|
|
|
INIT_LIST_HEAD(&ctrl->ctrl_list);
|
|
|
ctrl->lport = lport;
|
|
|
ctrl->rport = rport;
|
|
|
ctrl->dev = lport->dev;
|
|
|
ctrl->cnum = idx;
|
|
|
+ ctrl->ioq_live = false;
|
|
|
ctrl->assoc_active = false;
|
|
|
init_waitqueue_head(&ctrl->ioabort_wait);
|
|
|
|
|
@@ -3032,6 +3011,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|
|
|
|
|
ctrl->ctrl.sqsize = opts->queue_size - 1;
|
|
|
ctrl->ctrl.kato = opts->kato;
|
|
|
+ ctrl->ctrl.cntlid = 0xffff;
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
|
|
@@ -3039,6 +3019,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|
|
if (!ctrl->queues)
|
|
|
goto out_free_ida;
|
|
|
|
|
|
+ nvme_fc_init_queue(ctrl, 0);
|
|
|
+
|
|
|
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
|
|
|
ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
|
|
|
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
|
|
@@ -3081,62 +3063,24 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|
|
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
|
|
|
spin_unlock_irqrestore(&rport->lock, flags);
|
|
|
|
|
|
- /*
|
|
|
- * It's possible that transactions used to create the association
|
|
|
- * may fail. Examples: CreateAssociation LS or CreateIOConnection
|
|
|
- * LS gets dropped/corrupted/fails; or a frame gets dropped or a
|
|
|
- * command times out for one of the actions to init the controller
|
|
|
- * (Connect, Get/Set_Property, Set_Features, etc). Many of these
|
|
|
- * transport errors (frame drop, LS failure) inherently must kill
|
|
|
- * the association. The transport is coded so that any command used
|
|
|
- * to create the association (prior to a LIVE state transition
|
|
|
- * while NEW or CONNECTING) will fail if it completes in error or
|
|
|
- * times out.
|
|
|
- *
|
|
|
- * As such: as the connect request was mostly likely due to a
|
|
|
- * udev event that discovered the remote port, meaning there is
|
|
|
- * not an admin or script there to restart if the connect
|
|
|
- * request fails, retry the initial connection creation up to
|
|
|
- * three times before giving up and declaring failure.
|
|
|
- */
|
|
|
- for (retry = 0; retry < 3; retry++) {
|
|
|
- ret = nvme_fc_create_association(ctrl);
|
|
|
- if (!ret)
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- if (ret) {
|
|
|
- nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
|
|
|
- cancel_work_sync(&ctrl->ctrl.reset_work);
|
|
|
- cancel_delayed_work_sync(&ctrl->connect_work);
|
|
|
-
|
|
|
- /* couldn't schedule retry - fail out */
|
|
|
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
|
|
|
+ !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
|
|
|
dev_err(ctrl->ctrl.device,
|
|
|
- "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
|
|
|
-
|
|
|
- ctrl->ctrl.opts = NULL;
|
|
|
+ "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
|
|
|
+ goto fail_ctrl;
|
|
|
+ }
|
|
|
|
|
|
- /* initiate nvme ctrl ref counting teardown */
|
|
|
- nvme_uninit_ctrl(&ctrl->ctrl);
|
|
|
+ nvme_get_ctrl(&ctrl->ctrl);
|
|
|
|
|
|
- /* Remove core ctrl ref. */
|
|
|
+ if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
|
|
|
nvme_put_ctrl(&ctrl->ctrl);
|
|
|
-
|
|
|
- /* as we're past the point where we transition to the ref
|
|
|
- * counting teardown path, if we return a bad pointer here,
|
|
|
- * the calling routine, thinking it's prior to the
|
|
|
- * transition, will do an rport put. Since the teardown
|
|
|
- * path also does a rport put, we do an extra get here to
|
|
|
- * so proper order/teardown happens.
|
|
|
- */
|
|
|
- nvme_fc_rport_get(rport);
|
|
|
-
|
|
|
- if (ret > 0)
|
|
|
- ret = -EIO;
|
|
|
- return ERR_PTR(ret);
|
|
|
+ dev_err(ctrl->ctrl.device,
|
|
|
+ "NVME-FC{%d}: failed to schedule initial connect\n",
|
|
|
+ ctrl->cnum);
|
|
|
+ goto fail_ctrl;
|
|
|
}
|
|
|
|
|
|
- nvme_get_ctrl(&ctrl->ctrl);
|
|
|
+ flush_delayed_work(&ctrl->connect_work);
|
|
|
|
|
|
dev_info(ctrl->ctrl.device,
|
|
|
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
|
|
@@ -3144,6 +3088,30 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|
|
|
|
|
return &ctrl->ctrl;
|
|
|
|
|
|
+fail_ctrl:
|
|
|
+ nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
|
|
|
+ cancel_work_sync(&ctrl->ctrl.reset_work);
|
|
|
+ cancel_delayed_work_sync(&ctrl->connect_work);
|
|
|
+
|
|
|
+ ctrl->ctrl.opts = NULL;
|
|
|
+
|
|
|
+ /* initiate nvme ctrl ref counting teardown */
|
|
|
+ nvme_uninit_ctrl(&ctrl->ctrl);
|
|
|
+
|
|
|
+ /* Remove core ctrl ref. */
|
|
|
+ nvme_put_ctrl(&ctrl->ctrl);
|
|
|
+
|
|
|
+ /* as we're past the point where we transition to the ref
|
|
|
+ * counting teardown path, if we return a bad pointer here,
|
|
|
+ * the calling routine, thinking it's prior to the
|
|
|
+ * transition, will do an rport put. Since the teardown
|
|
|
+ * path also does a rport put, we do an extra get here to
|
|
|
+ * so proper order/teardown happens.
|
|
|
+ */
|
|
|
+ nvme_fc_rport_get(rport);
|
|
|
+
|
|
|
+ return ERR_PTR(-EIO);
|
|
|
+
|
|
|
out_cleanup_admin_q:
|
|
|
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
|
|
out_free_admin_tag_set:
|