|
@@ -64,13 +64,13 @@ struct nvme_fc_queue {
|
|
|
struct nvmefc_ls_req_op {
|
|
|
struct nvmefc_ls_req ls_req;
|
|
|
|
|
|
- struct nvme_fc_ctrl *ctrl;
|
|
|
+ struct nvme_fc_rport *rport;
|
|
|
struct nvme_fc_queue *queue;
|
|
|
struct request *rq;
|
|
|
|
|
|
int ls_error;
|
|
|
struct completion ls_done;
|
|
|
- struct list_head lsreq_list; /* ctrl->ls_req_list */
|
|
|
+ struct list_head lsreq_list; /* rport->ls_req_list */
|
|
|
bool req_queued;
|
|
|
};
|
|
|
|
|
@@ -120,6 +120,9 @@ struct nvme_fc_rport {
|
|
|
|
|
|
struct list_head endp_list; /* for lport->endp_list */
|
|
|
struct list_head ctrl_list;
|
|
|
+ struct list_head ls_req_list;
|
|
|
+ struct device *dev; /* physical device for dma */
|
|
|
+ struct nvme_fc_lport *lport;
|
|
|
spinlock_t lock;
|
|
|
struct kref ref;
|
|
|
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
|
|
@@ -144,7 +147,6 @@ struct nvme_fc_ctrl {
|
|
|
u64 cap;
|
|
|
|
|
|
struct list_head ctrl_list; /* rport->ctrl_list */
|
|
|
- struct list_head ls_req_list;
|
|
|
|
|
|
struct blk_mq_tag_set admin_tag_set;
|
|
|
struct blk_mq_tag_set tag_set;
|
|
@@ -419,9 +421,12 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
|
|
|
|
|
|
INIT_LIST_HEAD(&newrec->endp_list);
|
|
|
INIT_LIST_HEAD(&newrec->ctrl_list);
|
|
|
+ INIT_LIST_HEAD(&newrec->ls_req_list);
|
|
|
kref_init(&newrec->ref);
|
|
|
spin_lock_init(&newrec->lock);
|
|
|
newrec->remoteport.localport = &lport->localport;
|
|
|
+ newrec->dev = lport->dev;
|
|
|
+ newrec->lport = lport;
|
|
|
newrec->remoteport.private = &newrec[1];
|
|
|
newrec->remoteport.port_role = pinfo->port_role;
|
|
|
newrec->remoteport.node_name = pinfo->node_name;
|
|
@@ -444,7 +449,6 @@ out_kfree_rport:
|
|
|
out_reghost_failed:
|
|
|
*portptr = NULL;
|
|
|
return ret;
|
|
|
-
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
|
|
|
|
|
@@ -624,16 +628,16 @@ static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
|
|
|
|
|
|
|
|
|
static void
|
|
|
-__nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,
|
|
|
- struct nvmefc_ls_req_op *lsop)
|
|
|
+__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
|
|
|
{
|
|
|
+ struct nvme_fc_rport *rport = lsop->rport;
|
|
|
struct nvmefc_ls_req *lsreq = &lsop->ls_req;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&ctrl->lock, flags);
|
|
|
+ spin_lock_irqsave(&rport->lock, flags);
|
|
|
|
|
|
if (!lsop->req_queued) {
|
|
|
- spin_unlock_irqrestore(&ctrl->lock, flags);
|
|
|
+ spin_unlock_irqrestore(&rport->lock, flags);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -641,56 +645,71 @@ __nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,
|
|
|
|
|
|
lsop->req_queued = false;
|
|
|
|
|
|
- spin_unlock_irqrestore(&ctrl->lock, flags);
|
|
|
+ spin_unlock_irqrestore(&rport->lock, flags);
|
|
|
|
|
|
- fc_dma_unmap_single(ctrl->dev, lsreq->rqstdma,
|
|
|
+ fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
|
|
|
(lsreq->rqstlen + lsreq->rsplen),
|
|
|
DMA_BIDIRECTIONAL);
|
|
|
|
|
|
- nvme_fc_ctrl_put(ctrl);
|
|
|
+ nvme_fc_rport_put(rport);
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-__nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl,
|
|
|
+__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
|
|
|
struct nvmefc_ls_req_op *lsop,
|
|
|
void (*done)(struct nvmefc_ls_req *req, int status))
|
|
|
{
|
|
|
struct nvmefc_ls_req *lsreq = &lsop->ls_req;
|
|
|
unsigned long flags;
|
|
|
- int ret;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
- if (!nvme_fc_ctrl_get(ctrl))
|
|
|
+ if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
|
|
|
+ return -ECONNREFUSED;
|
|
|
+
|
|
|
+ if (!nvme_fc_rport_get(rport))
|
|
|
return -ESHUTDOWN;
|
|
|
|
|
|
lsreq->done = done;
|
|
|
- lsop->ctrl = ctrl;
|
|
|
+ lsop->rport = rport;
|
|
|
lsop->req_queued = false;
|
|
|
INIT_LIST_HEAD(&lsop->lsreq_list);
|
|
|
init_completion(&lsop->ls_done);
|
|
|
|
|
|
- lsreq->rqstdma = fc_dma_map_single(ctrl->dev, lsreq->rqstaddr,
|
|
|
+ lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
|
|
|
lsreq->rqstlen + lsreq->rsplen,
|
|
|
DMA_BIDIRECTIONAL);
|
|
|
- if (fc_dma_mapping_error(ctrl->dev, lsreq->rqstdma)) {
|
|
|
- nvme_fc_ctrl_put(ctrl);
|
|
|
- dev_err(ctrl->dev,
|
|
|
- "els request command failed EFAULT.\n");
|
|
|
- return -EFAULT;
|
|
|
+ if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
|
|
|
+ ret = -EFAULT;
|
|
|
+ goto out_putrport;
|
|
|
}
|
|
|
lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
|
|
|
|
|
|
- spin_lock_irqsave(&ctrl->lock, flags);
|
|
|
+ spin_lock_irqsave(&rport->lock, flags);
|
|
|
|
|
|
- list_add_tail(&lsop->lsreq_list, &ctrl->ls_req_list);
|
|
|
+ list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
|
|
|
|
|
|
lsop->req_queued = true;
|
|
|
|
|
|
- spin_unlock_irqrestore(&ctrl->lock, flags);
|
|
|
+ spin_unlock_irqrestore(&rport->lock, flags);
|
|
|
|
|
|
- ret = ctrl->lport->ops->ls_req(&ctrl->lport->localport,
|
|
|
- &ctrl->rport->remoteport, lsreq);
|
|
|
+ ret = rport->lport->ops->ls_req(&rport->lport->localport,
|
|
|
+ &rport->remoteport, lsreq);
|
|
|
if (ret)
|
|
|
- lsop->ls_error = ret;
|
|
|
+ goto out_unlink;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_unlink:
|
|
|
+ lsop->ls_error = ret;
|
|
|
+ spin_lock_irqsave(&rport->lock, flags);
|
|
|
+ lsop->req_queued = false;
|
|
|
+ list_del(&lsop->lsreq_list);
|
|
|
+ spin_unlock_irqrestore(&rport->lock, flags);
|
|
|
+ fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
|
|
|
+ (lsreq->rqstlen + lsreq->rsplen),
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
+out_putrport:
|
|
|
+ nvme_fc_rport_put(rport);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -705,15 +724,15 @@ nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
|
|
|
+nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
|
|
|
{
|
|
|
struct nvmefc_ls_req *lsreq = &lsop->ls_req;
|
|
|
struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
|
|
|
int ret;
|
|
|
|
|
|
- ret = __nvme_fc_send_ls_req(ctrl, lsop, nvme_fc_send_ls_req_done);
|
|
|
+ ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
|
|
|
|
|
|
- if (!ret)
|
|
|
+ if (!ret) {
|
|
|
/*
|
|
|
* No timeout/not interruptible as we need the struct
|
|
|
* to exist until the lldd calls us back. Thus mandate
|
|
@@ -722,14 +741,14 @@ nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
|
|
|
*/
|
|
|
wait_for_completion(&lsop->ls_done);
|
|
|
|
|
|
- __nvme_fc_finish_ls_req(ctrl, lsop);
|
|
|
+ __nvme_fc_finish_ls_req(lsop);
|
|
|
|
|
|
- if (ret) {
|
|
|
- dev_err(ctrl->dev,
|
|
|
- "ls request command failed (%d).\n", ret);
|
|
|
- return ret;
|
|
|
+ ret = lsop->ls_error;
|
|
|
}
|
|
|
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
/* ACC or RJT payload ? */
|
|
|
if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
|
|
|
return -ENXIO;
|
|
@@ -737,19 +756,14 @@ nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-nvme_fc_send_ls_req_async(struct nvme_fc_ctrl *ctrl,
|
|
|
+static int
|
|
|
+nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
|
|
|
struct nvmefc_ls_req_op *lsop,
|
|
|
void (*done)(struct nvmefc_ls_req *req, int status))
|
|
|
{
|
|
|
- int ret;
|
|
|
-
|
|
|
- ret = __nvme_fc_send_ls_req(ctrl, lsop, done);
|
|
|
-
|
|
|
/* don't wait for completion */
|
|
|
|
|
|
- if (ret)
|
|
|
- done(&lsop->ls_req, ret);
|
|
|
+ return __nvme_fc_send_ls_req(rport, lsop, done);
|
|
|
}
|
|
|
|
|
|
/* Validation Error indexes into the string table below */
|
|
@@ -839,7 +853,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
|
|
|
lsreq->rsplen = sizeof(*assoc_acc);
|
|
|
lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
|
|
|
|
|
|
- ret = nvme_fc_send_ls_req(ctrl, lsop);
|
|
|
+ ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
|
|
|
if (ret)
|
|
|
goto out_free_buffer;
|
|
|
|
|
@@ -947,7 +961,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
|
|
|
lsreq->rsplen = sizeof(*conn_acc);
|
|
|
lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
|
|
|
|
|
|
- ret = nvme_fc_send_ls_req(ctrl, lsop);
|
|
|
+ ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
|
|
|
if (ret)
|
|
|
goto out_free_buffer;
|
|
|
|
|
@@ -998,14 +1012,8 @@ static void
|
|
|
nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
|
|
|
{
|
|
|
struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
|
|
|
- struct nvme_fc_ctrl *ctrl = lsop->ctrl;
|
|
|
-
|
|
|
- __nvme_fc_finish_ls_req(ctrl, lsop);
|
|
|
|
|
|
- if (status)
|
|
|
- dev_err(ctrl->dev,
|
|
|
- "disconnect assoc ls request command failed (%d).\n",
|
|
|
- status);
|
|
|
+ __nvme_fc_finish_ls_req(lsop);
|
|
|
|
|
|
/* fc-nvme iniator doesn't care about success or failure of cmd */
|
|
|
|
|
@@ -1036,6 +1044,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
|
|
|
struct fcnvme_ls_disconnect_acc *discon_acc;
|
|
|
struct nvmefc_ls_req_op *lsop;
|
|
|
struct nvmefc_ls_req *lsreq;
|
|
|
+ int ret;
|
|
|
|
|
|
lsop = kzalloc((sizeof(*lsop) +
|
|
|
ctrl->lport->ops->lsrqst_priv_sz +
|
|
@@ -1078,7 +1087,10 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
|
|
|
lsreq->rsplen = sizeof(*discon_acc);
|
|
|
lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
|
|
|
|
|
|
- nvme_fc_send_ls_req_async(ctrl, lsop, nvme_fc_disconnect_assoc_done);
|
|
|
+ ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
|
|
|
+ nvme_fc_disconnect_assoc_done);
|
|
|
+ if (ret)
|
|
|
+ kfree(lsop);
|
|
|
|
|
|
/* only meaningful part to terminating the association */
|
|
|
ctrl->association_id = 0;
|
|
@@ -2302,7 +2314,6 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|
|
|
|
|
ctrl->ctrl.opts = opts;
|
|
|
INIT_LIST_HEAD(&ctrl->ctrl_list);
|
|
|
- INIT_LIST_HEAD(&ctrl->ls_req_list);
|
|
|
ctrl->lport = lport;
|
|
|
ctrl->rport = rport;
|
|
|
ctrl->dev = lport->dev;
|