Browse Source

nvmet_fc: prevent new io rqsts in possible isr completions

When a bio completion calls back into the transport for a
back-end io device, the request completion path can free
the transport io job structure allowing it to be reused for
other operations. The transport has a defer_rcv queue which
holds temporary cmd rcv ops while waitng for io job structures.
when the job frees, if there's a cmd waiting, it is picked up
and submitted for processing, which can call back out to the
bio path if it's a read.  Unfortunately, what is unknown is the
context of the original bio done call, and it may be in a state
(softirq) that is not compatible with submitting the new bio in
the same calling sequence. This is especially true when using
scsi back-end devices as scsi is in softirq when it makes the
done call.

Correct by scheduling the io to be started via workq rather
than calling the start new io path inline to the original bio
done path.

Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
James Smart 7 years ago
parent
commit
9d625f7792
1 changed files with 16 additions and 3 deletions
  1. 16 3
      drivers/nvme/target/fc.c

+ 16 - 3
drivers/nvme/target/fc.c

@@ -87,6 +87,7 @@ struct nvmet_fc_fcp_iod {
 	struct nvmet_req		req;
 	struct nvmet_req		req;
 	struct work_struct		work;
 	struct work_struct		work;
 	struct work_struct		done_work;
 	struct work_struct		done_work;
+	struct work_struct		defer_work;
 
 
 	struct nvmet_fc_tgtport		*tgtport;
 	struct nvmet_fc_tgtport		*tgtport;
 	struct nvmet_fc_tgt_queue	*queue;
 	struct nvmet_fc_tgt_queue	*queue;
@@ -224,6 +225,7 @@ static DEFINE_IDA(nvmet_fc_tgtport_cnt);
 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
+static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
@@ -429,6 +431,7 @@ nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
 	for (i = 0; i < queue->sqsize; fod++, i++) {
 	for (i = 0; i < queue->sqsize; fod++, i++) {
 		INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
 		INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
 		INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
 		INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
+		INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
 		fod->tgtport = tgtport;
 		fod->tgtport = tgtport;
 		fod->queue = queue;
 		fod->queue = queue;
 		fod->active = false;
 		fod->active = false;
@@ -511,6 +514,17 @@ nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
 		nvmet_fc_handle_fcp_rqst(tgtport, fod);
 		nvmet_fc_handle_fcp_rqst(tgtport, fod);
 }
 }
 
 
+static void
+nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
+{
+	struct nvmet_fc_fcp_iod *fod =
+		container_of(work, struct nvmet_fc_fcp_iod, defer_work);
+
+	/* Submit deferred IO for processing */
+	nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
+
+}
+
 static void
 static void
 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
 			struct nvmet_fc_fcp_iod *fod)
 			struct nvmet_fc_fcp_iod *fod)
@@ -568,13 +582,12 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
 	/* inform LLDD IO is now being processed */
 	/* inform LLDD IO is now being processed */
 	tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
 	tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
 
 
-	/* Submit deferred IO for processing */
-	nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
-
 	/*
 	/*
 	 * Leave the queue lookup get reference taken when
 	 * Leave the queue lookup get reference taken when
 	 * fod was originally allocated.
 	 * fod was originally allocated.
 	 */
 	 */
+
+	queue_work(queue->work_q, &fod->defer_work);
 }
 }
 
 
 static int
 static int