|
@@ -1048,7 +1048,8 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
|
|
|
+bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
|
|
+ bool got_budget)
|
|
|
{
|
|
|
struct blk_mq_hw_ctx *hctx;
|
|
|
struct request *rq;
|
|
@@ -1057,6 +1058,8 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
|
|
|
if (list_empty(list))
|
|
|
return false;
|
|
|
|
|
|
+ WARN_ON(!list_is_singular(list) && got_budget);
|
|
|
+
|
|
|
/*
|
|
|
* Now process all the entries, sending them to the driver.
|
|
|
*/
|
|
@@ -1074,16 +1077,30 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
|
|
|
* The initial allocation attempt failed, so we need to
|
|
|
* rerun the hardware queue when a tag is freed.
|
|
|
*/
|
|
|
- if (!blk_mq_dispatch_wait_add(hctx))
|
|
|
+ if (!blk_mq_dispatch_wait_add(hctx)) {
|
|
|
+ if (got_budget)
|
|
|
+ blk_mq_put_dispatch_budget(hctx);
|
|
|
break;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* It's possible that a tag was freed in the window
|
|
|
* between the allocation failure and adding the
|
|
|
* hardware queue to the wait queue.
|
|
|
*/
|
|
|
- if (!blk_mq_get_driver_tag(rq, &hctx, false))
|
|
|
+ if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
|
|
|
+ if (got_budget)
|
|
|
+ blk_mq_put_dispatch_budget(hctx);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!got_budget) {
|
|
|
+ ret = blk_mq_get_dispatch_budget(hctx);
|
|
|
+ if (ret == BLK_STS_RESOURCE)
|
|
|
break;
|
|
|
+ if (ret != BLK_STS_OK)
|
|
|
+ goto fail_rq;
|
|
|
}
|
|
|
|
|
|
list_del_init(&rq->queuelist);
|
|
@@ -1111,6 +1128,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ fail_rq:
|
|
|
if (unlikely(ret != BLK_STS_OK)) {
|
|
|
errors++;
|
|
|
blk_mq_end_request(rq, BLK_STS_IOERR);
|
|
@@ -1169,6 +1187,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
|
|
|
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|
|
{
|
|
|
int srcu_idx;
|
|
|
+ bool run_queue;
|
|
|
|
|
|
/*
|
|
|
* We should be running this queue from one of the CPUs that
|
|
@@ -1185,15 +1204,18 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|
|
|
|
|
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
|
|
|
rcu_read_lock();
|
|
|
- blk_mq_sched_dispatch_requests(hctx);
|
|
|
+ run_queue = blk_mq_sched_dispatch_requests(hctx);
|
|
|
rcu_read_unlock();
|
|
|
} else {
|
|
|
might_sleep();
|
|
|
|
|
|
srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
|
|
|
- blk_mq_sched_dispatch_requests(hctx);
|
|
|
+ run_queue = blk_mq_sched_dispatch_requests(hctx);
|
|
|
srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
|
|
|
}
|
|
|
+
|
|
|
+ if (run_queue)
|
|
|
+ blk_mq_run_hw_queue(hctx, true);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1582,6 +1604,13 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|
|
if (!blk_mq_get_driver_tag(rq, NULL, false))
|
|
|
goto insert;
|
|
|
|
|
|
+ ret = blk_mq_get_dispatch_budget(hctx);
|
|
|
+ if (ret == BLK_STS_RESOURCE) {
|
|
|
+ blk_mq_put_driver_tag(rq);
|
|
|
+ goto insert;
|
|
|
+ } else if (ret != BLK_STS_OK)
|
|
|
+ goto fail_rq;
|
|
|
+
|
|
|
new_cookie = request_to_qc_t(hctx, rq);
|
|
|
|
|
|
/*
|
|
@@ -1598,6 +1627,7 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|
|
__blk_mq_requeue_request(rq);
|
|
|
goto insert;
|
|
|
default:
|
|
|
+ fail_rq:
|
|
|
*cookie = BLK_QC_T_NONE;
|
|
|
blk_mq_end_request(rq, ret);
|
|
|
return;
|
|
@@ -2582,6 +2612,9 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
|
|
if (!set->ops->queue_rq)
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ if (!set->ops->get_budget ^ !set->ops->put_budget)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
|
|
|
pr_info("blk-mq: reduced tag depth to %u\n",
|
|
|
BLK_MQ_MAX_DEPTH);
|