|
@@ -1180,7 +1180,12 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
|
|
struct blk_mq_queue_data bd;
|
|
|
|
|
|
rq = list_first_entry(list, struct request, queuelist);
|
|
|
- if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
|
|
|
+
|
|
|
+ hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
|
|
|
+ if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (!blk_mq_get_driver_tag(rq, NULL, false)) {
|
|
|
/*
|
|
|
* The initial allocation attempt failed, so we need to
|
|
|
* rerun the hardware queue when a tag is freed. The
|
|
@@ -1189,8 +1194,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
|
|
* we'll re-run it below.
|
|
|
*/
|
|
|
if (!blk_mq_mark_tag_wait(&hctx, rq)) {
|
|
|
- if (got_budget)
|
|
|
- blk_mq_put_dispatch_budget(hctx);
|
|
|
+ blk_mq_put_dispatch_budget(hctx);
|
|
|
/*
|
|
|
* For non-shared tags, the RESTART check
|
|
|
* will suffice.
|
|
@@ -1201,11 +1205,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
|
|
|
- blk_mq_put_driver_tag(rq);
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
list_del_init(&rq->queuelist);
|
|
|
|
|
|
bd.rq = rq;
|
|
@@ -1804,11 +1803,11 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|
|
if (q->elevator && !bypass_insert)
|
|
|
goto insert;
|
|
|
|
|
|
- if (!blk_mq_get_driver_tag(rq, NULL, false))
|
|
|
+ if (!blk_mq_get_dispatch_budget(hctx))
|
|
|
goto insert;
|
|
|
|
|
|
- if (!blk_mq_get_dispatch_budget(hctx)) {
|
|
|
- blk_mq_put_driver_tag(rq);
|
|
|
+ if (!blk_mq_get_driver_tag(rq, NULL, false)) {
|
|
|
+ blk_mq_put_dispatch_budget(hctx);
|
|
|
goto insert;
|
|
|
}
|
|
|
|