|
@@ -33,28 +33,6 @@ static LIST_HEAD(all_q_list);
|
|
|
|
|
|
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
|
|
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
|
|
|
|
|
|
-static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
|
|
|
|
- unsigned int cpu)
|
|
|
|
-{
|
|
|
|
- return per_cpu_ptr(q->queue_ctx, cpu);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * This assumes per-cpu software queueing queues. They could be per-node
|
|
|
|
- * as well, for instance. For now this is hardcoded as-is. Note that we don't
|
|
|
|
- * care about preemption, since we know the ctx's are persistent. This does
|
|
|
|
- * mean that we can't rely on ctx always matching the currently running CPU.
|
|
|
|
- */
|
|
|
|
-static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
|
|
|
|
-{
|
|
|
|
- return __blk_mq_get_ctx(q, get_cpu());
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
|
|
|
|
-{
|
|
|
|
- put_cpu();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Check if any of the ctx's have pending work in this hardware queue
|
|
* Check if any of the ctx's have pending work in this hardware queue
|
|
*/
|
|
*/
|
|
@@ -232,24 +210,23 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
|
|
}
|
|
}
|
|
|
|
|
|
static struct request *
|
|
static struct request *
|
|
-__blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
|
|
|
- struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved)
|
|
|
|
|
|
+__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
|
|
{
|
|
{
|
|
struct request *rq;
|
|
struct request *rq;
|
|
unsigned int tag;
|
|
unsigned int tag;
|
|
|
|
|
|
- tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
|
|
|
|
|
|
+ tag = blk_mq_get_tag(data);
|
|
if (tag != BLK_MQ_TAG_FAIL) {
|
|
if (tag != BLK_MQ_TAG_FAIL) {
|
|
- rq = hctx->tags->rqs[tag];
|
|
|
|
|
|
+ rq = data->hctx->tags->rqs[tag];
|
|
|
|
|
|
rq->cmd_flags = 0;
|
|
rq->cmd_flags = 0;
|
|
- if (blk_mq_tag_busy(hctx)) {
|
|
|
|
|
|
+ if (blk_mq_tag_busy(data->hctx)) {
|
|
rq->cmd_flags = REQ_MQ_INFLIGHT;
|
|
rq->cmd_flags = REQ_MQ_INFLIGHT;
|
|
- atomic_inc(&hctx->nr_active);
|
|
|
|
|
|
+ atomic_inc(&data->hctx->nr_active);
|
|
}
|
|
}
|
|
|
|
|
|
rq->tag = tag;
|
|
rq->tag = tag;
|
|
- blk_mq_rq_ctx_init(q, ctx, rq, rw);
|
|
|
|
|
|
+ blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
|
|
return rq;
|
|
return rq;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -262,22 +239,27 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
|
|
struct blk_mq_ctx *ctx;
|
|
struct blk_mq_ctx *ctx;
|
|
struct blk_mq_hw_ctx *hctx;
|
|
struct blk_mq_hw_ctx *hctx;
|
|
struct request *rq;
|
|
struct request *rq;
|
|
|
|
+ struct blk_mq_alloc_data alloc_data;
|
|
|
|
|
|
if (blk_mq_queue_enter(q))
|
|
if (blk_mq_queue_enter(q))
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
ctx = blk_mq_get_ctx(q);
|
|
ctx = blk_mq_get_ctx(q);
|
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
|
|
+ blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
|
|
|
|
+ reserved, ctx, hctx);
|
|
|
|
|
|
- rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT,
|
|
|
|
- reserved);
|
|
|
|
|
|
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
|
|
if (!rq && (gfp & __GFP_WAIT)) {
|
|
if (!rq && (gfp & __GFP_WAIT)) {
|
|
__blk_mq_run_hw_queue(hctx);
|
|
__blk_mq_run_hw_queue(hctx);
|
|
blk_mq_put_ctx(ctx);
|
|
blk_mq_put_ctx(ctx);
|
|
|
|
|
|
ctx = blk_mq_get_ctx(q);
|
|
ctx = blk_mq_get_ctx(q);
|
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
- rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved);
|
|
|
|
|
|
+ blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
|
|
|
|
+ hctx);
|
|
|
|
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
|
|
|
|
+ ctx = alloc_data.ctx;
|
|
}
|
|
}
|
|
blk_mq_put_ctx(ctx);
|
|
blk_mq_put_ctx(ctx);
|
|
return rq;
|
|
return rq;
|
|
@@ -547,15 +529,20 @@ void blk_mq_kick_requeue_list(struct request_queue *q)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
|
|
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
|
|
|
|
|
|
-struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag)
|
|
|
|
|
|
+static inline bool is_flush_request(struct request *rq, unsigned int tag)
|
|
{
|
|
{
|
|
- struct request_queue *q = hctx->queue;
|
|
|
|
|
|
+ return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
|
|
|
|
+ rq->q->flush_rq->tag == tag);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
|
|
|
|
+{
|
|
|
|
+ struct request *rq = tags->rqs[tag];
|
|
|
|
|
|
- if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) &&
|
|
|
|
- q->flush_rq->tag == tag)
|
|
|
|
- return q->flush_rq;
|
|
|
|
|
|
+ if (!is_flush_request(rq, tag))
|
|
|
|
+ return rq;
|
|
|
|
|
|
- return hctx->tags->rqs[tag];
|
|
|
|
|
|
+ return rq->q->flush_rq;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(blk_mq_tag_to_rq);
|
|
EXPORT_SYMBOL(blk_mq_tag_to_rq);
|
|
|
|
|
|
@@ -584,7 +571,7 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
|
|
if (tag >= hctx->tags->nr_tags)
|
|
if (tag >= hctx->tags->nr_tags)
|
|
break;
|
|
break;
|
|
|
|
|
|
- rq = blk_mq_tag_to_rq(hctx, tag++);
|
|
|
|
|
|
+ rq = blk_mq_tag_to_rq(hctx->tags, tag++);
|
|
if (rq->q != hctx->queue)
|
|
if (rq->q != hctx->queue)
|
|
continue;
|
|
continue;
|
|
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
|
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
|
@@ -1158,6 +1145,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
|
|
struct blk_mq_ctx *ctx;
|
|
struct blk_mq_ctx *ctx;
|
|
struct request *rq;
|
|
struct request *rq;
|
|
int rw = bio_data_dir(bio);
|
|
int rw = bio_data_dir(bio);
|
|
|
|
+ struct blk_mq_alloc_data alloc_data;
|
|
|
|
|
|
if (unlikely(blk_mq_queue_enter(q))) {
|
|
if (unlikely(blk_mq_queue_enter(q))) {
|
|
bio_endio(bio, -EIO);
|
|
bio_endio(bio, -EIO);
|
|
@@ -1171,7 +1159,9 @@ static struct request *blk_mq_map_request(struct request_queue *q,
|
|
rw |= REQ_SYNC;
|
|
rw |= REQ_SYNC;
|
|
|
|
|
|
trace_block_getrq(q, bio, rw);
|
|
trace_block_getrq(q, bio, rw);
|
|
- rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
|
|
|
|
|
|
+ blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
|
|
|
|
+ hctx);
|
|
|
|
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
|
|
if (unlikely(!rq)) {
|
|
if (unlikely(!rq)) {
|
|
__blk_mq_run_hw_queue(hctx);
|
|
__blk_mq_run_hw_queue(hctx);
|
|
blk_mq_put_ctx(ctx);
|
|
blk_mq_put_ctx(ctx);
|
|
@@ -1179,8 +1169,11 @@ static struct request *blk_mq_map_request(struct request_queue *q,
|
|
|
|
|
|
ctx = blk_mq_get_ctx(q);
|
|
ctx = blk_mq_get_ctx(q);
|
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
- rq = __blk_mq_alloc_request(q, hctx, ctx, rw,
|
|
|
|
- __GFP_WAIT|GFP_ATOMIC, false);
|
|
|
|
|
|
+ blk_mq_set_alloc_data(&alloc_data, q,
|
|
|
|
+ __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
|
|
|
|
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
|
|
|
|
+ ctx = alloc_data.ctx;
|
|
|
|
+ hctx = alloc_data.hctx;
|
|
}
|
|
}
|
|
|
|
|
|
hctx->queued++;
|
|
hctx->queued++;
|
|
@@ -1288,6 +1281,8 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|
return;
|
|
return;
|
|
|
|
|
|
rq = blk_mq_map_request(q, bio, &data);
|
|
rq = blk_mq_map_request(q, bio, &data);
|
|
|
|
+ if (unlikely(!rq))
|
|
|
|
+ return;
|
|
|
|
|
|
if (unlikely(is_flush_fua)) {
|
|
if (unlikely(is_flush_fua)) {
|
|
blk_mq_bio_to_request(rq, bio);
|
|
blk_mq_bio_to_request(rq, bio);
|
|
@@ -1562,6 +1557,8 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
|
|
if (i == nr_queue)
|
|
if (i == nr_queue)
|
|
break;
|
|
break;
|
|
|
|
|
|
|
|
+ blk_mq_tag_idle(hctx);
|
|
|
|
+
|
|
if (set->ops->exit_hctx)
|
|
if (set->ops->exit_hctx)
|
|
set->ops->exit_hctx(hctx, i);
|
|
set->ops->exit_hctx(hctx, i);
|
|
|
|
|
|
@@ -1779,7 +1776,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
|
|
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|
{
|
|
{
|
|
struct blk_mq_hw_ctx **hctxs;
|
|
struct blk_mq_hw_ctx **hctxs;
|
|
- struct blk_mq_ctx *ctx;
|
|
|
|
|
|
+ struct blk_mq_ctx __percpu *ctx;
|
|
struct request_queue *q;
|
|
struct request_queue *q;
|
|
unsigned int *map;
|
|
unsigned int *map;
|
|
int i;
|
|
int i;
|