|
|
@@ -965,16 +965,15 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
|
|
return BLK_EH_RESET_TIMER;
|
|
|
}
|
|
|
|
|
|
-static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
|
|
|
+static void nvme_cancel_io(struct request *req, void *data, bool reserved)
|
|
|
{
|
|
|
- struct nvme_queue *nvmeq = data;
|
|
|
+ struct nvme_dev *dev = data;
|
|
|
int status;
|
|
|
|
|
|
if (!blk_mq_request_started(req))
|
|
|
return;
|
|
|
|
|
|
- dev_dbg_ratelimited(nvmeq->dev->ctrl.device,
|
|
|
- "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
|
|
|
+ dev_dbg_ratelimited(dev->ctrl.device, "Cancelling I/O %d", req->tag);
|
|
|
|
|
|
status = NVME_SC_ABORT_REQ;
|
|
|
if (blk_queue_dying(req->q))
|
|
|
@@ -1031,14 +1030,6 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void nvme_clear_queue(struct nvme_queue *nvmeq)
|
|
|
-{
|
|
|
- spin_lock_irq(&nvmeq->q_lock);
|
|
|
- if (nvmeq->tags && *nvmeq->tags)
|
|
|
- blk_mq_all_tag_busy_iter(*nvmeq->tags, nvme_cancel_queue_ios, nvmeq);
|
|
|
- spin_unlock_irq(&nvmeq->q_lock);
|
|
|
-}
|
|
|
-
|
|
|
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
|
|
|
{
|
|
|
struct nvme_queue *nvmeq = dev->queues[0];
|
|
|
@@ -1765,8 +1756,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|
|
}
|
|
|
nvme_pci_disable(dev);
|
|
|
|
|
|
- for (i = dev->queue_count - 1; i >= 0; i--)
|
|
|
- nvme_clear_queue(dev->queues[i]);
|
|
|
+ blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
|
|
|
+ blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
|
|
|
mutex_unlock(&dev->shutdown_lock);
|
|
|
}
|
|
|
|