|
@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
|
|
|
static void nvme_ns_remove(struct nvme_ns *ns);
|
|
|
static int nvme_revalidate_disk(struct gendisk *disk);
|
|
|
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
|
|
|
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
|
|
|
+ unsigned nsid);
|
|
|
+
|
|
|
+static void nvme_set_queue_dying(struct nvme_ns *ns)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Revalidating a dead namespace sets capacity to 0. This will end
|
|
|
+ * buffered writers dirtying pages that can't be synced.
|
|
|
+ */
|
|
|
+ if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
|
|
|
+ return;
|
|
|
+ revalidate_disk(ns->disk);
|
|
|
+ blk_set_queue_dying(ns->queue);
|
|
|
+ /* Forcibly unquiesce queues to avoid blocking dispatch */
|
|
|
+ blk_mq_unquiesce_queue(ns->queue);
|
|
|
+}
|
|
|
|
|
|
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
|
|
|
{
|
|
@@ -1151,19 +1167,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
|
|
|
|
|
static void nvme_update_formats(struct nvme_ctrl *ctrl)
|
|
|
{
|
|
|
- struct nvme_ns *ns, *next;
|
|
|
- LIST_HEAD(rm_list);
|
|
|
+ struct nvme_ns *ns;
|
|
|
|
|
|
- down_write(&ctrl->namespaces_rwsem);
|
|
|
- list_for_each_entry(ns, &ctrl->namespaces, list) {
|
|
|
- if (ns->disk && nvme_revalidate_disk(ns->disk)) {
|
|
|
- list_move_tail(&ns->list, &rm_list);
|
|
|
- }
|
|
|
- }
|
|
|
- up_write(&ctrl->namespaces_rwsem);
|
|
|
+ down_read(&ctrl->namespaces_rwsem);
|
|
|
+ list_for_each_entry(ns, &ctrl->namespaces, list)
|
|
|
+ if (ns->disk && nvme_revalidate_disk(ns->disk))
|
|
|
+ nvme_set_queue_dying(ns);
|
|
|
+ up_read(&ctrl->namespaces_rwsem);
|
|
|
|
|
|
- list_for_each_entry_safe(ns, next, &rm_list, list)
|
|
|
- nvme_ns_remove(ns);
|
|
|
+ nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
|
|
|
}
|
|
|
|
|
|
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
|
|
@@ -3138,7 +3150,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
|
|
|
|
|
|
down_write(&ctrl->namespaces_rwsem);
|
|
|
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
|
|
|
- if (ns->head->ns_id > nsid)
|
|
|
+ if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
|
|
|
list_move_tail(&ns->list, &rm_list);
|
|
|
}
|
|
|
up_write(&ctrl->namespaces_rwsem);
|
|
@@ -3542,19 +3554,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
|
|
|
if (ctrl->admin_q)
|
|
|
blk_mq_unquiesce_queue(ctrl->admin_q);
|
|
|
|
|
|
- list_for_each_entry(ns, &ctrl->namespaces, list) {
|
|
|
- /*
|
|
|
- * Revalidating a dead namespace sets capacity to 0. This will
|
|
|
- * end buffered writers dirtying pages that can't be synced.
|
|
|
- */
|
|
|
- if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
|
|
|
- continue;
|
|
|
- revalidate_disk(ns->disk);
|
|
|
- blk_set_queue_dying(ns->queue);
|
|
|
+ list_for_each_entry(ns, &ctrl->namespaces, list)
|
|
|
+ nvme_set_queue_dying(ns);
|
|
|
|
|
|
- /* Forcibly unquiesce queues to avoid blocking dispatch */
|
|
|
- blk_mq_unquiesce_queue(ns->queue);
|
|
|
- }
|
|
|
up_read(&ctrl->namespaces_rwsem);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(nvme_kill_queues);
|