|
@@ -285,7 +285,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
|
|
rq->tag = -1;
|
|
|
rq->internal_tag = tag;
|
|
|
} else {
|
|
|
- if (blk_mq_tag_busy(data->hctx)) {
|
|
|
+ if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
|
|
|
rq_flags = RQF_MQ_INFLIGHT;
|
|
|
atomic_inc(&data->hctx->nr_active);
|
|
|
}
|
|
@@ -367,6 +367,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|
|
if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
|
|
|
!(data->flags & BLK_MQ_REQ_RESERVED))
|
|
|
e->type->ops.mq.limit_depth(op, data);
|
|
|
+ } else {
|
|
|
+ blk_mq_tag_busy(data->hctx);
|
|
|
}
|
|
|
|
|
|
tag = blk_mq_get_tag(data);
|
|
@@ -971,6 +973,7 @@ bool blk_mq_get_driver_tag(struct request *rq)
|
|
|
.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
|
|
|
.flags = BLK_MQ_REQ_NOWAIT,
|
|
|
};
|
|
|
+ bool shared;
|
|
|
|
|
|
if (rq->tag != -1)
|
|
|
goto done;
|
|
@@ -978,9 +981,10 @@ bool blk_mq_get_driver_tag(struct request *rq)
|
|
|
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
|
|
|
data.flags |= BLK_MQ_REQ_RESERVED;
|
|
|
|
|
|
+ shared = blk_mq_tag_busy(data.hctx);
|
|
|
rq->tag = blk_mq_get_tag(&data);
|
|
|
if (rq->tag >= 0) {
|
|
|
- if (blk_mq_tag_busy(data.hctx)) {
|
|
|
+ if (shared) {
|
|
|
rq->rq_flags |= RQF_MQ_INFLIGHT;
|
|
|
atomic_inc(&data.hctx->nr_active);
|
|
|
}
|