|
@@ -345,21 +345,23 @@ void blk_mq_sched_request_inserted(struct request *rq)
|
|
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
|
|
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
|
|
|
|
|
|
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
|
|
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
|
|
|
|
+ bool has_sched,
|
|
struct request *rq)
|
|
struct request *rq)
|
|
{
|
|
{
|
|
- if (rq->tag == -1) {
|
|
|
|
|
|
+ /* dispatch flush rq directly */
|
|
|
|
+ if (rq->rq_flags & RQF_FLUSH_SEQ) {
|
|
|
|
+ spin_lock(&hctx->lock);
|
|
|
|
+ list_add(&rq->queuelist, &hctx->dispatch);
|
|
|
|
+ spin_unlock(&hctx->lock);
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (has_sched) {
|
|
rq->rq_flags |= RQF_SORTED;
|
|
rq->rq_flags |= RQF_SORTED;
|
|
- return false;
|
|
|
|
|
|
+ WARN_ON(rq->tag != -1);
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * If we already have a real request tag, send directly to
|
|
|
|
- * the dispatch list.
|
|
|
|
- */
|
|
|
|
- spin_lock(&hctx->lock);
|
|
|
|
- list_add(&rq->queuelist, &hctx->dispatch);
|
|
|
|
- spin_unlock(&hctx->lock);
|
|
|
|
- return true;
|
|
|
|
|
|
+ return false;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -385,12 +387,13 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
|
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
|
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
|
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
|
|
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
|
|
|
|
|
|
- if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) {
|
|
|
|
|
|
+ /* flush rq in flush machinery need to be dispatched directly */
|
|
|
|
+ if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
|
|
blk_mq_sched_insert_flush(hctx, rq, can_block);
|
|
blk_mq_sched_insert_flush(hctx, rq, can_block);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- if (e && blk_mq_sched_bypass_insert(hctx, rq))
|
|
|
|
|
|
+ if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
|
|
goto run;
|
|
goto run;
|
|
|
|
|
|
if (e && e->type->ops.mq.insert_requests) {
|
|
if (e && e->type->ops.mq.insert_requests) {
|
|
@@ -428,7 +431,7 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
|
|
list_for_each_entry_safe(rq, next, list, queuelist) {
|
|
list_for_each_entry_safe(rq, next, list, queuelist) {
|
|
if (WARN_ON_ONCE(rq->tag != -1)) {
|
|
if (WARN_ON_ONCE(rq->tag != -1)) {
|
|
list_del_init(&rq->queuelist);
|
|
list_del_init(&rq->queuelist);
|
|
- blk_mq_sched_bypass_insert(hctx, rq);
|
|
|
|
|
|
+ blk_mq_sched_bypass_insert(hctx, true, rq);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|