|
@@ -1543,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
|
|
reinit_completion(&dev->ioq_wait);
|
|
reinit_completion(&dev->ioq_wait);
|
|
retry:
|
|
retry:
|
|
timeout = ADMIN_TIMEOUT;
|
|
timeout = ADMIN_TIMEOUT;
|
|
- for (; i > 0; i--) {
|
|
|
|
- struct nvme_queue *nvmeq = dev->queues[i];
|
|
|
|
-
|
|
|
|
- if (!pass)
|
|
|
|
- nvme_suspend_queue(nvmeq);
|
|
|
|
- if (nvme_delete_queue(nvmeq, opcode))
|
|
|
|
|
|
+ for (; i > 0; i--, sent++)
|
|
|
|
+ if (nvme_delete_queue(dev->queues[i], opcode))
|
|
break;
|
|
break;
|
|
- ++sent;
|
|
|
|
- }
|
|
|
|
|
|
+
|
|
while (sent--) {
|
|
while (sent--) {
|
|
timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
|
|
timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
|
|
if (timeout == 0)
|
|
if (timeout == 0)
|
|
@@ -1693,11 +1688,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|
nvme_stop_queues(&dev->ctrl);
|
|
nvme_stop_queues(&dev->ctrl);
|
|
csts = readl(dev->bar + NVME_REG_CSTS);
|
|
csts = readl(dev->bar + NVME_REG_CSTS);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ for (i = dev->queue_count - 1; i > 0; i--)
|
|
|
|
+ nvme_suspend_queue(dev->queues[i]);
|
|
|
|
+
|
|
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
|
|
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
|
|
- for (i = dev->queue_count - 1; i >= 0; i--) {
|
|
|
|
- struct nvme_queue *nvmeq = dev->queues[i];
|
|
|
|
- nvme_suspend_queue(nvmeq);
|
|
|
|
- }
|
|
|
|
|
|
+ nvme_suspend_queue(dev->queues[0]);
|
|
} else {
|
|
} else {
|
|
nvme_disable_io_queues(dev);
|
|
nvme_disable_io_queues(dev);
|
|
nvme_disable_admin_queue(dev, shutdown);
|
|
nvme_disable_admin_queue(dev, shutdown);
|