Prechádzať zdrojové kódy

blk-mq-sched: bypass the scheduler for flushes entirely

There's a weird inconsistency that flushes are mostly hidden from the
scheduler, but it needs to be aware of them in ->insert_requests().
Instead of having every scheduler call blk_mq_sched_bypass_insert(),
let's do it in the common framework.

Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Omar Sandoval 8 rokov pred
rodič
commit
0cacba6cf8
3 zmenil súbory, kde vykonal 23 pridanie a 6 odobranie
  1. 23 2
      block/blk-mq-sched.c
  2. 0 1
      block/blk-mq-sched.h
  3. 0 3
      block/mq-deadline.c

+ 23 - 2
block/blk-mq-sched.c

@@ -289,7 +289,8 @@ void blk_mq_sched_request_inserted(struct request *rq)
 }
 }
 EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
 EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
 
 
-bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq)
+static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
+				       struct request *rq)
 {
 {
 	if (rq->tag == -1) {
 	if (rq->tag == -1) {
 		rq->rq_flags |= RQF_SORTED;
 		rq->rq_flags |= RQF_SORTED;
@@ -305,7 +306,6 @@ bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq)
 	spin_unlock(&hctx->lock);
 	spin_unlock(&hctx->lock);
 	return true;
 	return true;
 }
 }
-EXPORT_SYMBOL_GPL(blk_mq_sched_bypass_insert);
 
 
 static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
 static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
 {
 {
@@ -363,6 +363,9 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
 		return;
 		return;
 	}
 	}
 
 
+	if (e && blk_mq_sched_bypass_insert(hctx, rq))
+		goto run;
+
 	if (e && e->type->ops.mq.insert_requests) {
 	if (e && e->type->ops.mq.insert_requests) {
 		LIST_HEAD(list);
 		LIST_HEAD(list);
 
 
@@ -374,6 +377,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
 		spin_unlock(&ctx->lock);
 		spin_unlock(&ctx->lock);
 	}
 	}
 
 
+run:
 	if (run_queue)
 	if (run_queue)
 		blk_mq_run_hw_queue(hctx, async);
 		blk_mq_run_hw_queue(hctx, async);
 }
 }
@@ -385,6 +389,23 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
 	struct elevator_queue *e = hctx->queue->elevator;
 	struct elevator_queue *e = hctx->queue->elevator;
 
 
+	if (e) {
+		struct request *rq, *next;
+
+		/*
+		 * We bypass requests that already have a driver tag assigned,
+		 * which should only be flushes. Flushes are only ever inserted
+		 * as single requests, so we shouldn't ever hit the
+		 * WARN_ON_ONCE() below (but let's handle it just in case).
+		 */
+		list_for_each_entry_safe(rq, next, list, queuelist) {
+			if (WARN_ON_ONCE(rq->tag != -1)) {
+				list_del_init(&rq->queuelist);
+				blk_mq_sched_bypass_insert(hctx, rq);
+			}
+		}
+	}
+
 	if (e && e->type->ops.mq.insert_requests)
 	if (e && e->type->ops.mq.insert_requests)
 		e->type->ops.mq.insert_requests(hctx, list, false);
 		e->type->ops.mq.insert_requests(hctx, list, false);
 	else
 	else

+ 0 - 1
block/blk-mq-sched.h

@@ -15,7 +15,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bi
 void blk_mq_sched_put_request(struct request *rq);
 void blk_mq_sched_put_request(struct request *rq);
 
 
 void blk_mq_sched_request_inserted(struct request *rq);
 void blk_mq_sched_request_inserted(struct request *rq);
-bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq);
 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio);
 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio);
 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);

+ 0 - 3
block/mq-deadline.c

@@ -395,9 +395,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 
 
 	blk_mq_sched_request_inserted(rq);
 	blk_mq_sched_request_inserted(rq);
 
 
-	if (blk_mq_sched_bypass_insert(hctx, rq))
-		return;
-
 	if (at_head || blk_rq_is_passthrough(rq)) {
 	if (at_head || blk_rq_is_passthrough(rq)) {
 		if (at_head)
 		if (at_head)
 			list_add(&rq->queuelist, &dd->dispatch);
 			list_add(&rq->queuelist, &dd->dispatch);