|
@@ -990,18 +990,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(blk_mq_delay_queue);
|
|
EXPORT_SYMBOL(blk_mq_delay_queue);
|
|
|
|
|
|
-static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
|
|
|
|
- struct request *rq, bool at_head)
|
|
|
|
|
|
+static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
|
|
|
|
+ struct blk_mq_ctx *ctx,
|
|
|
|
+ struct request *rq,
|
|
|
|
+ bool at_head)
|
|
{
|
|
{
|
|
- struct blk_mq_ctx *ctx = rq->mq_ctx;
|
|
|
|
-
|
|
|
|
trace_block_rq_insert(hctx->queue, rq);
|
|
trace_block_rq_insert(hctx->queue, rq);
|
|
|
|
|
|
if (at_head)
|
|
if (at_head)
|
|
list_add(&rq->queuelist, &ctx->rq_list);
|
|
list_add(&rq->queuelist, &ctx->rq_list);
|
|
else
|
|
else
|
|
list_add_tail(&rq->queuelist, &ctx->rq_list);
|
|
list_add_tail(&rq->queuelist, &ctx->rq_list);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
|
|
|
|
+ struct request *rq, bool at_head)
|
|
|
|
+{
|
|
|
|
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
|
|
|
|
|
|
|
|
+ __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
|
|
blk_mq_hctx_mark_pending(hctx, ctx);
|
|
blk_mq_hctx_mark_pending(hctx, ctx);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1057,8 +1064,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
|
|
rq = list_first_entry(list, struct request, queuelist);
|
|
rq = list_first_entry(list, struct request, queuelist);
|
|
list_del_init(&rq->queuelist);
|
|
list_del_init(&rq->queuelist);
|
|
rq->mq_ctx = ctx;
|
|
rq->mq_ctx = ctx;
|
|
- __blk_mq_insert_request(hctx, rq, false);
|
|
|
|
|
|
+ __blk_mq_insert_req_list(hctx, ctx, rq, false);
|
|
}
|
|
}
|
|
|
|
+ blk_mq_hctx_mark_pending(hctx, ctx);
|
|
spin_unlock(&ctx->lock);
|
|
spin_unlock(&ctx->lock);
|
|
|
|
|
|
blk_mq_run_hw_queue(hctx, from_schedule);
|
|
blk_mq_run_hw_queue(hctx, from_schedule);
|