|
@@ -204,6 +204,10 @@ struct fcloop_lport {
|
|
|
struct completion unreg_done;
|
|
|
};
|
|
|
|
|
|
+struct fcloop_lport_priv {
|
|
|
+ struct fcloop_lport *lport;
|
|
|
+};
|
|
|
+
|
|
|
struct fcloop_rport {
|
|
|
struct nvme_fc_remote_port *remoteport;
|
|
|
struct nvmet_fc_target_port *targetport;
|
|
@@ -238,21 +242,32 @@ struct fcloop_lsreq {
|
|
|
int status;
|
|
|
};
|
|
|
|
|
|
+enum {
|
|
|
+ INI_IO_START = 0,
|
|
|
+ INI_IO_ACTIVE = 1,
|
|
|
+ INI_IO_ABORTED = 2,
|
|
|
+ INI_IO_COMPLETED = 3,
|
|
|
+};
|
|
|
+
|
|
|
struct fcloop_fcpreq {
|
|
|
struct fcloop_tport *tport;
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
|
|
spinlock_t reqlock;
|
|
|
u16 status;
|
|
|
+ u32 inistate;
|
|
|
bool active;
|
|
|
bool aborted;
|
|
|
- struct work_struct work;
|
|
|
+ struct kref ref;
|
|
|
+ struct work_struct fcp_rcv_work;
|
|
|
+ struct work_struct abort_rcv_work;
|
|
|
+ struct work_struct tio_done_work;
|
|
|
struct nvmefc_tgt_fcp_req tgt_fcp_req;
|
|
|
};
|
|
|
|
|
|
struct fcloop_ini_fcpreq {
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
|
|
struct fcloop_fcpreq *tfcp_req;
|
|
|
- struct work_struct iniwork;
|
|
|
+ spinlock_t inilock;
|
|
|
};
|
|
|
|
|
|
static inline struct fcloop_lsreq *
|
|
@@ -343,17 +358,122 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * FCP IO operation done by initiator abort.
|
|
|
- * call back up initiator "done" flows.
|
|
|
- */
|
|
|
static void
|
|
|
-fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
|
|
|
+fcloop_tfcp_req_free(struct kref *ref)
|
|
|
{
|
|
|
- struct fcloop_ini_fcpreq *inireq =
|
|
|
- container_of(work, struct fcloop_ini_fcpreq, iniwork);
|
|
|
+ struct fcloop_fcpreq *tfcp_req =
|
|
|
+ container_of(ref, struct fcloop_fcpreq, ref);
|
|
|
+
|
|
|
+ kfree(tfcp_req);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
|
|
|
+{
|
|
|
+ kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
|
|
|
+{
|
|
|
+ return kref_get_unless_zero(&tfcp_req->ref);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
|
|
|
+ struct fcloop_fcpreq *tfcp_req, int status)
|
|
|
+{
|
|
|
+ struct fcloop_ini_fcpreq *inireq = NULL;
|
|
|
+
|
|
|
+ if (fcpreq) {
|
|
|
+ inireq = fcpreq->private;
|
|
|
+ spin_lock(&inireq->inilock);
|
|
|
+ inireq->tfcp_req = NULL;
|
|
|
+ spin_unlock(&inireq->inilock);
|
|
|
+
|
|
|
+ fcpreq->status = status;
|
|
|
+ fcpreq->done(fcpreq);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* release original io reference on tgt struct */
|
|
|
+ fcloop_tfcp_req_put(tfcp_req);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+fcloop_fcp_recv_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct fcloop_fcpreq *tfcp_req =
|
|
|
+ container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
|
|
|
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
|
|
|
+ int ret = 0;
|
|
|
+ bool aborted = false;
|
|
|
+
|
|
|
+ spin_lock(&tfcp_req->reqlock);
|
|
|
+ switch (tfcp_req->inistate) {
|
|
|
+ case INI_IO_START:
|
|
|
+ tfcp_req->inistate = INI_IO_ACTIVE;
|
|
|
+ break;
|
|
|
+ case INI_IO_ABORTED:
|
|
|
+ aborted = true;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ spin_unlock(&tfcp_req->reqlock);
|
|
|
+ WARN_ON(1);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ spin_unlock(&tfcp_req->reqlock);
|
|
|
+
|
|
|
+ if (unlikely(aborted))
|
|
|
+ ret = -ECANCELED;
|
|
|
+ else
|
|
|
+ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
|
|
|
+ &tfcp_req->tgt_fcp_req,
|
|
|
+ fcpreq->cmdaddr, fcpreq->cmdlen);
|
|
|
+ if (ret)
|
|
|
+ fcloop_call_host_done(fcpreq, tfcp_req, ret);
|
|
|
+
|
|
|
+ return;
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+fcloop_fcp_abort_recv_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct fcloop_fcpreq *tfcp_req =
|
|
|
+ container_of(work, struct fcloop_fcpreq, abort_rcv_work);
|
|
|
+ struct nvmefc_fcp_req *fcpreq;
|
|
|
+ bool completed = false;
|
|
|
+
|
|
|
+ spin_lock(&tfcp_req->reqlock);
|
|
|
+ fcpreq = tfcp_req->fcpreq;
|
|
|
+ switch (tfcp_req->inistate) {
|
|
|
+ case INI_IO_ABORTED:
|
|
|
+ break;
|
|
|
+ case INI_IO_COMPLETED:
|
|
|
+ completed = true;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ spin_unlock(&tfcp_req->reqlock);
|
|
|
+ WARN_ON(1);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ spin_unlock(&tfcp_req->reqlock);
|
|
|
+
|
|
|
+ if (unlikely(completed)) {
|
|
|
+ /* remove reference taken in original abort downcall */
|
|
|
+ fcloop_tfcp_req_put(tfcp_req);
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
- inireq->fcpreq->done(inireq->fcpreq);
|
|
|
+ if (tfcp_req->tport->targetport)
|
|
|
+ nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
|
|
|
+ &tfcp_req->tgt_fcp_req);
|
|
|
+
|
|
|
+ spin_lock(&tfcp_req->reqlock);
|
|
|
+ tfcp_req->fcpreq = NULL;
|
|
|
+ spin_unlock(&tfcp_req->reqlock);
|
|
|
+
|
|
|
+ fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
|
|
|
+ /* call_host_done releases reference for abort downcall */
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -364,20 +484,15 @@ static void
|
|
|
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
|
|
|
{
|
|
|
struct fcloop_fcpreq *tfcp_req =
|
|
|
- container_of(work, struct fcloop_fcpreq, work);
|
|
|
- struct fcloop_tport *tport = tfcp_req->tport;
|
|
|
+ container_of(work, struct fcloop_fcpreq, tio_done_work);
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
|
|
|
|
|
spin_lock(&tfcp_req->reqlock);
|
|
|
fcpreq = tfcp_req->fcpreq;
|
|
|
+ tfcp_req->inistate = INI_IO_COMPLETED;
|
|
|
spin_unlock(&tfcp_req->reqlock);
|
|
|
|
|
|
- if (tport->remoteport && fcpreq) {
|
|
|
- fcpreq->status = tfcp_req->status;
|
|
|
- fcpreq->done(fcpreq);
|
|
|
- }
|
|
|
-
|
|
|
- kfree(tfcp_req);
|
|
|
+ fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
|
|
|
}
|
|
|
|
|
|
|
|
@@ -390,7 +505,6 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
|
|
|
struct fcloop_rport *rport = remoteport->private;
|
|
|
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
|
|
struct fcloop_fcpreq *tfcp_req;
|
|
|
- int ret = 0;
|
|
|
|
|
|
if (!rport->targetport)
|
|
|
return -ECONNREFUSED;
|
|
@@ -401,16 +515,20 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
|
|
|
|
|
|
inireq->fcpreq = fcpreq;
|
|
|
inireq->tfcp_req = tfcp_req;
|
|
|
- INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
|
|
|
+ spin_lock_init(&inireq->inilock);
|
|
|
+
|
|
|
tfcp_req->fcpreq = fcpreq;
|
|
|
tfcp_req->tport = rport->targetport->private;
|
|
|
+ tfcp_req->inistate = INI_IO_START;
|
|
|
spin_lock_init(&tfcp_req->reqlock);
|
|
|
- INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
|
|
|
+ INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
|
|
|
+ INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
|
|
|
+ INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
|
|
|
+ kref_init(&tfcp_req->ref);
|
|
|
|
|
|
- ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
|
|
|
- fcpreq->cmdaddr, fcpreq->cmdlen);
|
|
|
+ schedule_work(&tfcp_req->fcp_rcv_work);
|
|
|
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -589,7 +707,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
|
|
|
{
|
|
|
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
|
|
|
|
|
- schedule_work(&tfcp_req->work);
|
|
|
+ schedule_work(&tfcp_req->tio_done_work);
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -605,27 +723,47 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
|
|
void *hw_queue_handle,
|
|
|
struct nvmefc_fcp_req *fcpreq)
|
|
|
{
|
|
|
- struct fcloop_rport *rport = remoteport->private;
|
|
|
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
|
|
- struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
|
|
|
+ struct fcloop_fcpreq *tfcp_req;
|
|
|
+ bool abortio = true;
|
|
|
+
|
|
|
+ spin_lock(&inireq->inilock);
|
|
|
+ tfcp_req = inireq->tfcp_req;
|
|
|
+ if (tfcp_req)
|
|
|
+ fcloop_tfcp_req_get(tfcp_req);
|
|
|
+ spin_unlock(&inireq->inilock);
|
|
|
|
|
|
if (!tfcp_req)
|
|
|
/* abort has already been called */
|
|
|
return;
|
|
|
|
|
|
- if (rport->targetport)
|
|
|
- nvmet_fc_rcv_fcp_abort(rport->targetport,
|
|
|
- &tfcp_req->tgt_fcp_req);
|
|
|
-
|
|
|
/* break initiator/target relationship for io */
|
|
|
spin_lock(&tfcp_req->reqlock);
|
|
|
- inireq->tfcp_req = NULL;
|
|
|
- tfcp_req->fcpreq = NULL;
|
|
|
+ switch (tfcp_req->inistate) {
|
|
|
+ case INI_IO_START:
|
|
|
+ case INI_IO_ACTIVE:
|
|
|
+ tfcp_req->inistate = INI_IO_ABORTED;
|
|
|
+ break;
|
|
|
+ case INI_IO_COMPLETED:
|
|
|
+ abortio = false;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ spin_unlock(&tfcp_req->reqlock);
|
|
|
+ WARN_ON(1);
|
|
|
+ return;
|
|
|
+ }
|
|
|
spin_unlock(&tfcp_req->reqlock);
|
|
|
|
|
|
- /* post the aborted io completion */
|
|
|
- fcpreq->status = -ECANCELED;
|
|
|
- schedule_work(&inireq->iniwork);
|
|
|
+ if (abortio)
|
|
|
+ /* leave the reference while the work item is scheduled */
|
|
|
+ WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
|
|
|
+ else {
|
|
|
+ /*
|
|
|
+ * as the io has already had the done callback made,
|
|
|
+ * nothing more to do. So release the reference taken above
|
|
|
+ */
|
|
|
+ fcloop_tfcp_req_put(tfcp_req);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -657,7 +795,8 @@ fcloop_nport_get(struct fcloop_nport *nport)
|
|
|
static void
|
|
|
fcloop_localport_delete(struct nvme_fc_local_port *localport)
|
|
|
{
|
|
|
- struct fcloop_lport *lport = localport->private;
|
|
|
+ struct fcloop_lport_priv *lport_priv = localport->private;
|
|
|
+ struct fcloop_lport *lport = lport_priv->lport;
|
|
|
|
|
|
/* release any threads waiting for the unreg to complete */
|
|
|
complete(&lport->unreg_done);
|
|
@@ -697,7 +836,7 @@ static struct nvme_fc_port_template fctemplate = {
|
|
|
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
|
|
|
.dma_boundary = FCLOOP_DMABOUND_4G,
|
|
|
/* sizes of additional private data for data structures */
|
|
|
- .local_priv_sz = sizeof(struct fcloop_lport),
|
|
|
+ .local_priv_sz = sizeof(struct fcloop_lport_priv),
|
|
|
.remote_priv_sz = sizeof(struct fcloop_rport),
|
|
|
.lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
|
|
|
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
|
|
@@ -714,8 +853,7 @@ static struct nvmet_fc_target_template tgttemplate = {
|
|
|
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
|
|
|
.dma_boundary = FCLOOP_DMABOUND_4G,
|
|
|
/* optional features */
|
|
|
- .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
|
|
|
- NVMET_FCTGTFEAT_OPDONE_IN_ISR,
|
|
|
+ .target_features = 0,
|
|
|
/* sizes of additional private data for data structures */
|
|
|
.target_priv_sz = sizeof(struct fcloop_tport),
|
|
|
};
|
|
@@ -728,11 +866,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
|
|
|
struct fcloop_ctrl_options *opts;
|
|
|
struct nvme_fc_local_port *localport;
|
|
|
struct fcloop_lport *lport;
|
|
|
- int ret;
|
|
|
+ struct fcloop_lport_priv *lport_priv;
|
|
|
+ unsigned long flags;
|
|
|
+ int ret = -ENOMEM;
|
|
|
+
|
|
|
+ lport = kzalloc(sizeof(*lport), GFP_KERNEL);
|
|
|
+ if (!lport)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
|
|
|
if (!opts)
|
|
|
- return -ENOMEM;
|
|
|
+ goto out_free_lport;
|
|
|
|
|
|
ret = fcloop_parse_options(opts, buf);
|
|
|
if (ret)
|
|
@@ -752,23 +896,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
|
|
ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
|
|
|
if (!ret) {
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
/* success */
|
|
|
- lport = localport->private;
|
|
|
+ lport_priv = localport->private;
|
|
|
+ lport_priv->lport = lport;
|
|
|
+
|
|
|
lport->localport = localport;
|
|
|
INIT_LIST_HEAD(&lport->lport_list);
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
list_add_tail(&lport->lport_list, &fcloop_lports);
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
-
|
|
|
- /* mark all of the input buffer consumed */
|
|
|
- ret = count;
|
|
|
}
|
|
|
|
|
|
out_free_opts:
|
|
|
kfree(opts);
|
|
|
+out_free_lport:
|
|
|
+ /* free only if we're going to fail */
|
|
|
+ if (ret)
|
|
|
+ kfree(lport);
|
|
|
+
|
|
|
return ret ? ret : count;
|
|
|
}
|
|
|
|
|
@@ -790,6 +936,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
|
|
|
|
|
|
wait_for_completion(&lport->unreg_done);
|
|
|
|
|
|
+ kfree(lport);
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|