|
@@ -1512,13 +1512,19 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
|
|
|
static int
|
|
|
__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
|
|
|
{
|
|
|
- int state;
|
|
|
+ unsigned long flags;
|
|
|
+ int opstate;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&ctrl->lock, flags);
|
|
|
+ opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
|
|
|
+ if (opstate != FCPOP_STATE_ACTIVE)
|
|
|
+ atomic_set(&op->state, opstate);
|
|
|
+ else if (ctrl->flags & FCCTRL_TERMIO)
|
|
|
+ ctrl->iocnt++;
|
|
|
+ spin_unlock_irqrestore(&ctrl->lock, flags);
|
|
|
|
|
|
- state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
|
|
|
- if (state != FCPOP_STATE_ACTIVE) {
|
|
|
- atomic_set(&op->state, state);
|
|
|
+ if (opstate != FCPOP_STATE_ACTIVE)
|
|
|
return -ECANCELED;
|
|
|
- }
|
|
|
|
|
|
ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
|
|
|
&ctrl->rport->remoteport,
|
|
@@ -1532,52 +1538,23 @@ static void
|
|
|
nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
|
|
|
{
|
|
|
struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
|
|
|
- unsigned long flags;
|
|
|
- int i, ret;
|
|
|
-
|
|
|
- for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
|
|
|
- if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
|
|
|
- continue;
|
|
|
-
|
|
|
- spin_lock_irqsave(&ctrl->lock, flags);
|
|
|
- if (ctrl->flags & FCCTRL_TERMIO) {
|
|
|
- ctrl->iocnt++;
|
|
|
- aen_op->flags |= FCOP_FLAGS_TERMIO;
|
|
|
- }
|
|
|
- spin_unlock_irqrestore(&ctrl->lock, flags);
|
|
|
-
|
|
|
- ret = __nvme_fc_abort_op(ctrl, aen_op);
|
|
|
- if (ret) {
|
|
|
- /*
|
|
|
- * if __nvme_fc_abort_op failed the io wasn't
|
|
|
- * active. Thus this call path is running in
|
|
|
- * parallel to the io complete. Treat as non-error.
|
|
|
- */
|
|
|
+ int i;
|
|
|
|
|
|
- /* back out the flags/counters */
|
|
|
- spin_lock_irqsave(&ctrl->lock, flags);
|
|
|
- if (ctrl->flags & FCCTRL_TERMIO)
|
|
|
- ctrl->iocnt--;
|
|
|
- aen_op->flags &= ~FCOP_FLAGS_TERMIO;
|
|
|
- spin_unlock_irqrestore(&ctrl->lock, flags);
|
|
|
- return;
|
|
|
- }
|
|
|
- }
|
|
|
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
|
|
|
+ __nvme_fc_abort_op(ctrl, aen_op);
|
|
|
}
|
|
|
|
|
|
static inline int
|
|
|
__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
|
|
|
- struct nvme_fc_fcp_op *op)
|
|
|
+ struct nvme_fc_fcp_op *op, int opstate)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
bool complete_rq = false;
|
|
|
|
|
|
spin_lock_irqsave(&ctrl->lock, flags);
|
|
|
- if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
|
|
|
- if (ctrl->flags & FCCTRL_TERMIO) {
|
|
|
- if (!--ctrl->iocnt)
|
|
|
- wake_up(&ctrl->ioabort_wait);
|
|
|
- }
|
|
|
+ if (opstate == FCPOP_STATE_ABORTED && ctrl->flags & FCCTRL_TERMIO) {
|
|
|
+ if (!--ctrl->iocnt)
|
|
|
+ wake_up(&ctrl->ioabort_wait);
|
|
|
}
|
|
|
if (op->flags & FCOP_FLAGS_RELEASED)
|
|
|
complete_rq = true;
|
|
@@ -1601,6 +1578,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
|
|
|
__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
|
|
|
union nvme_result result;
|
|
|
bool terminate_assoc = true;
|
|
|
+ int opstate;
|
|
|
|
|
|
/*
|
|
|
* WARNING:
|
|
@@ -1639,11 +1617,12 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
|
|
|
* association to be terminated.
|
|
|
*/
|
|
|
|
|
|
+ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
|
|
|
+
|
|
|
fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
|
|
|
sizeof(op->rsp_iu), DMA_FROM_DEVICE);
|
|
|
|
|
|
- if (atomic_read(&op->state) == FCPOP_STATE_ABORTED ||
|
|
|
- op->flags & FCOP_FLAGS_TERMIO)
|
|
|
+ if (opstate == FCPOP_STATE_ABORTED)
|
|
|
status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
|
|
|
else if (freq->status)
|
|
|
status = cpu_to_le16(NVME_SC_INTERNAL << 1);
|
|
@@ -1708,7 +1687,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
|
|
|
done:
|
|
|
if (op->flags & FCOP_FLAGS_AEN) {
|
|
|
nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
|
|
|
- __nvme_fc_fcpop_chk_teardowns(ctrl, op);
|
|
|
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
|
|
|
atomic_set(&op->state, FCPOP_STATE_IDLE);
|
|
|
op->flags = FCOP_FLAGS_AEN; /* clear other flags */
|
|
|
nvme_fc_ctrl_put(ctrl);
|
|
@@ -1725,7 +1704,7 @@ done:
|
|
|
ctrl->ctrl.state == NVME_CTRL_CONNECTING))
|
|
|
status |= cpu_to_le16(NVME_SC_DNR << 1);
|
|
|
|
|
|
- if (__nvme_fc_fcpop_chk_teardowns(ctrl, op))
|
|
|
+ if (__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate))
|
|
|
__nvme_fc_final_op_cleanup(rq);
|
|
|
else
|
|
|
nvme_end_request(rq, status, result);
|
|
@@ -2421,8 +2400,7 @@ __nvme_fc_final_op_cleanup(struct request *rq)
|
|
|
struct nvme_fc_ctrl *ctrl = op->ctrl;
|
|
|
|
|
|
atomic_set(&op->state, FCPOP_STATE_IDLE);
|
|
|
- op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
|
|
|
- FCOP_FLAGS_COMPLETE);
|
|
|
+ op->flags &= ~(FCOP_FLAGS_RELEASED | FCOP_FLAGS_COMPLETE);
|
|
|
|
|
|
nvme_fc_unmap_data(ctrl, rq, op);
|
|
|
nvme_complete_rq(rq);
|
|
@@ -2476,35 +2454,11 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
|
|
|
struct nvme_ctrl *nctrl = data;
|
|
|
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
|
|
|
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
|
|
|
- unsigned long flags;
|
|
|
- int status;
|
|
|
|
|
|
if (!blk_mq_request_started(req))
|
|
|
return;
|
|
|
|
|
|
- spin_lock_irqsave(&ctrl->lock, flags);
|
|
|
- if (ctrl->flags & FCCTRL_TERMIO) {
|
|
|
- ctrl->iocnt++;
|
|
|
- op->flags |= FCOP_FLAGS_TERMIO;
|
|
|
- }
|
|
|
- spin_unlock_irqrestore(&ctrl->lock, flags);
|
|
|
-
|
|
|
- status = __nvme_fc_abort_op(ctrl, op);
|
|
|
- if (status) {
|
|
|
- /*
|
|
|
- * if __nvme_fc_abort_op failed the io wasn't
|
|
|
- * active. Thus this call path is running in
|
|
|
- * parallel to the io complete. Treat as non-error.
|
|
|
- */
|
|
|
-
|
|
|
- /* back out the flags/counters */
|
|
|
- spin_lock_irqsave(&ctrl->lock, flags);
|
|
|
- if (ctrl->flags & FCCTRL_TERMIO)
|
|
|
- ctrl->iocnt--;
|
|
|
- op->flags &= ~FCOP_FLAGS_TERMIO;
|
|
|
- spin_unlock_irqrestore(&ctrl->lock, flags);
|
|
|
- return;
|
|
|
- }
|
|
|
+ __nvme_fc_abort_op(ctrl, op);
|
|
|
}
|
|
|
|
|
|
|