|
@@ -1903,34 +1903,6 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
|
|
|
kthread_stop(tmp);
|
|
|
}
|
|
|
|
|
|
-static void nvme_freeze_queues(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- struct nvme_ns *ns;
|
|
|
-
|
|
|
- list_for_each_entry(ns, &dev->ctrl.namespaces, list) {
|
|
|
- blk_mq_freeze_queue_start(ns->queue);
|
|
|
-
|
|
|
- spin_lock_irq(ns->queue->queue_lock);
|
|
|
- queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
|
|
|
- spin_unlock_irq(ns->queue->queue_lock);
|
|
|
-
|
|
|
- blk_mq_cancel_requeue_work(ns->queue);
|
|
|
- blk_mq_stop_hw_queues(ns->queue);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_unfreeze_queues(struct nvme_dev *dev)
|
|
|
-{
|
|
|
- struct nvme_ns *ns;
|
|
|
-
|
|
|
- list_for_each_entry(ns, &dev->ctrl.namespaces, list) {
|
|
|
- queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
|
|
|
- blk_mq_unfreeze_queue(ns->queue);
|
|
|
- blk_mq_start_stopped_hw_queues(ns->queue, true);
|
|
|
- blk_mq_kick_requeue_list(ns->queue);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
static void nvme_dev_shutdown(struct nvme_dev *dev)
|
|
|
{
|
|
|
int i;
|
|
@@ -1940,7 +1912,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
|
|
|
|
|
|
mutex_lock(&dev->shutdown_lock);
|
|
|
if (dev->bar) {
|
|
|
- nvme_freeze_queues(dev);
|
|
|
+ nvme_freeze_queues(&dev->ctrl);
|
|
|
csts = readl(dev->bar + NVME_REG_CSTS);
|
|
|
}
|
|
|
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
|
|
@@ -2049,7 +2021,7 @@ static void nvme_reset_work(struct work_struct *work)
|
|
|
dev_warn(dev->dev, "IO queues not created\n");
|
|
|
nvme_remove_namespaces(&dev->ctrl);
|
|
|
} else {
|
|
|
- nvme_unfreeze_queues(dev);
|
|
|
+ nvme_unfreeze_queues(&dev->ctrl);
|
|
|
nvme_dev_add(dev);
|
|
|
}
|
|
|
|