|
@@ -90,9 +90,11 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
|
|
return atomic_read(&hctx->nr_active) < depth;
|
|
|
}
|
|
|
|
|
|
-static int __blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
|
|
|
+static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
|
|
|
+ struct sbitmap_queue *bt)
|
|
|
{
|
|
|
- if (!hctx_may_queue(hctx, bt))
|
|
|
+ if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
|
|
|
+ !hctx_may_queue(data->hctx, bt))
|
|
|
return -1;
|
|
|
return __sbitmap_queue_get(bt);
|
|
|
}
|
|
@@ -118,7 +120,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|
|
tag_offset = tags->nr_reserved_tags;
|
|
|
}
|
|
|
|
|
|
- tag = __blk_mq_get_tag(data->hctx, bt);
|
|
|
+ tag = __blk_mq_get_tag(data, bt);
|
|
|
if (tag != -1)
|
|
|
goto found_tag;
|
|
|
|
|
@@ -129,7 +131,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|
|
do {
|
|
|
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
|
|
|
|
|
|
- tag = __blk_mq_get_tag(data->hctx, bt);
|
|
|
+ tag = __blk_mq_get_tag(data, bt);
|
|
|
if (tag != -1)
|
|
|
break;
|
|
|
|
|
@@ -144,7 +146,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|
|
* Retry tag allocation after running the hardware queue,
|
|
|
* as running the queue may also have found completions.
|
|
|
*/
|
|
|
- tag = __blk_mq_get_tag(data->hctx, bt);
|
|
|
+ tag = __blk_mq_get_tag(data, bt);
|
|
|
if (tag != -1)
|
|
|
break;
|
|
|
|