Ver Fonte

nvme: kick requeue list when requeueing a request instead of when starting the queues

When we requeue a request, we can always insert the request
back to the scheduler instead of doing it when restarting
the queues and kicking the requeue work, so get rid of
the requeue kick in nvme (core and drivers).

Also, now there is no need start hw queues in nvme_kill_queues
We don't stop the hw queues anymore, so no need to
start them.

Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Sagi Grimberg há 8 anos atrás
pai
commit
8d7b8fafad
1 ficheiros alterados com 2 adições e 17 exclusões
  1. 2 17
      drivers/nvme/host/core.c

+ 2 - 17
drivers/nvme/host/core.c

@@ -131,7 +131,7 @@ void nvme_complete_rq(struct request *req)
 {
 {
 	if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
 	if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
 		nvme_req(req)->retries++;
 		nvme_req(req)->retries++;
-		blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
+		blk_mq_requeue_request(req, true);
 		return;
 		return;
 	}
 	}
 
 
@@ -2694,9 +2694,6 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
 	/* Forcibly unquiesce queues to avoid blocking dispatch */
 	/* Forcibly unquiesce queues to avoid blocking dispatch */
 	blk_mq_unquiesce_queue(ctrl->admin_q);
 	blk_mq_unquiesce_queue(ctrl->admin_q);
 
 
-	/* Forcibly start all queues to avoid having stuck requests */
-	blk_mq_start_hw_queues(ctrl->admin_q);
-
 	list_for_each_entry(ns, &ctrl->namespaces, list) {
 	list_for_each_entry(ns, &ctrl->namespaces, list) {
 		/*
 		/*
 		 * Revalidating a dead namespace sets capacity to 0. This will
 		 * Revalidating a dead namespace sets capacity to 0. This will
@@ -2709,16 +2706,6 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
 
 
 		/* Forcibly unquiesce queues to avoid blocking dispatch */
 		/* Forcibly unquiesce queues to avoid blocking dispatch */
 		blk_mq_unquiesce_queue(ns->queue);
 		blk_mq_unquiesce_queue(ns->queue);
-
-		/*
-		 * Forcibly start all queues to avoid having stuck requests.
-		 * Note that we must ensure the queues are not stopped
-		 * when the final removal happens.
-		 */
-		blk_mq_start_hw_queues(ns->queue);
-
-		/* draining requests in requeue list */
-		blk_mq_kick_requeue_list(ns->queue);
 	}
 	}
 	mutex_unlock(&ctrl->namespaces_mutex);
 	mutex_unlock(&ctrl->namespaces_mutex);
 }
 }
@@ -2787,10 +2774,8 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 	struct nvme_ns *ns;
 	struct nvme_ns *ns;
 
 
 	mutex_lock(&ctrl->namespaces_mutex);
 	mutex_lock(&ctrl->namespaces_mutex);
-	list_for_each_entry(ns, &ctrl->namespaces, list) {
+	list_for_each_entry(ns, &ctrl->namespaces, list)
 		blk_mq_unquiesce_queue(ns->queue);
 		blk_mq_unquiesce_queue(ns->queue);
-		blk_mq_kick_requeue_list(ns->queue);
-	}
 	mutex_unlock(&ctrl->namespaces_mutex);
 	mutex_unlock(&ctrl->namespaces_mutex);
 }
 }
 EXPORT_SYMBOL_GPL(nvme_start_queues);
 EXPORT_SYMBOL_GPL(nvme_start_queues);