|
@@ -1094,7 +1094,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
|
|
bool got_budget)
|
|
|
{
|
|
|
struct blk_mq_hw_ctx *hctx;
|
|
|
- struct request *rq;
|
|
|
+ struct request *rq, *nxt;
|
|
|
int errors, queued;
|
|
|
|
|
|
if (list_empty(list))
|
|
@@ -1151,14 +1151,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
|
|
if (list_empty(list))
|
|
|
bd.last = true;
|
|
|
else {
|
|
|
- struct request *nxt;
|
|
|
-
|
|
|
nxt = list_first_entry(list, struct request, queuelist);
|
|
|
bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
|
|
|
}
|
|
|
|
|
|
ret = q->mq_ops->queue_rq(hctx, &bd);
|
|
|
if (ret == BLK_STS_RESOURCE) {
|
|
|
+ /*
|
|
|
+ * If an I/O scheduler has been configured and we got a
|
|
|
+ * driver tag for the next request already, free it again.
|
|
|
+ */
|
|
|
+ if (!list_empty(list)) {
|
|
|
+ nxt = list_first_entry(list, struct request, queuelist);
|
|
|
+ blk_mq_put_driver_tag(nxt);
|
|
|
+ }
|
|
|
blk_mq_put_driver_tag_hctx(hctx, rq);
|
|
|
list_add(&rq->queuelist, list);
|
|
|
__blk_mq_requeue_request(rq);
|
|
@@ -1181,13 +1187,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
|
|
* that is where we will continue on next queue run.
|
|
|
*/
|
|
|
if (!list_empty(list)) {
|
|
|
- /*
|
|
|
- * If an I/O scheduler has been configured and we got a driver
|
|
|
- * tag for the next request already, free it again.
|
|
|
- */
|
|
|
- rq = list_first_entry(list, struct request, queuelist);
|
|
|
- blk_mq_put_driver_tag(rq);
|
|
|
-
|
|
|
spin_lock(&hctx->lock);
|
|
|
list_splice_init(list, &hctx->dispatch);
|
|
|
spin_unlock(&hctx->lock);
|