|
@@ -9,6 +9,7 @@
|
|
|
#include <linux/backing-dev.h>
|
|
|
#include <linux/bio.h>
|
|
|
#include <linux/blkdev.h>
|
|
|
+#include <linux/kmemleak.h>
|
|
|
#include <linux/mm.h>
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/slab.h>
|
|
@@ -989,18 +990,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_mq_delay_queue);
|
|
|
|
|
|
-static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
|
|
|
- struct request *rq, bool at_head)
|
|
|
+static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
|
|
|
+ struct blk_mq_ctx *ctx,
|
|
|
+ struct request *rq,
|
|
|
+ bool at_head)
|
|
|
{
|
|
|
- struct blk_mq_ctx *ctx = rq->mq_ctx;
|
|
|
-
|
|
|
trace_block_rq_insert(hctx->queue, rq);
|
|
|
|
|
|
if (at_head)
|
|
|
list_add(&rq->queuelist, &ctx->rq_list);
|
|
|
else
|
|
|
list_add_tail(&rq->queuelist, &ctx->rq_list);
|
|
|
+}
|
|
|
|
|
|
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
|
|
|
+ struct request *rq, bool at_head)
|
|
|
+{
|
|
|
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
|
|
|
+
|
|
|
+ __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
|
|
|
blk_mq_hctx_mark_pending(hctx, ctx);
|
|
|
}
|
|
|
|
|
@@ -1056,8 +1064,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
|
|
|
rq = list_first_entry(list, struct request, queuelist);
|
|
|
list_del_init(&rq->queuelist);
|
|
|
rq->mq_ctx = ctx;
|
|
|
- __blk_mq_insert_request(hctx, rq, false);
|
|
|
+ __blk_mq_insert_req_list(hctx, ctx, rq, false);
|
|
|
}
|
|
|
+ blk_mq_hctx_mark_pending(hctx, ctx);
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
|
|
blk_mq_run_hw_queue(hctx, from_schedule);
|
|
@@ -1139,7 +1148,7 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
|
|
|
struct blk_mq_ctx *ctx,
|
|
|
struct request *rq, struct bio *bio)
|
|
|
{
|
|
|
- if (!hctx_allow_merges(hctx)) {
|
|
|
+ if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
|
|
|
blk_mq_bio_to_request(rq, bio);
|
|
|
spin_lock(&ctx->lock);
|
|
|
insert_rq:
|
|
@@ -1267,9 +1276,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|
|
|
|
|
blk_queue_split(q, &bio, q->bio_split);
|
|
|
|
|
|
- if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
|
|
- blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
|
|
|
- return;
|
|
|
+ if (!is_flush_fua && !blk_queue_nomerges(q)) {
|
|
|
+ if (blk_attempt_plug_merge(q, bio, &request_count,
|
|
|
+ &same_queue_rq))
|
|
|
+ return;
|
|
|
+ } else
|
|
|
+ request_count = blk_plug_queued_count(q);
|
|
|
|
|
|
rq = blk_mq_map_request(q, bio, &data);
|
|
|
if (unlikely(!rq))
|
|
@@ -1376,7 +1388,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|
|
plug = current->plug;
|
|
|
if (plug) {
|
|
|
blk_mq_bio_to_request(rq, bio);
|
|
|
- if (list_empty(&plug->mq_list))
|
|
|
+ if (!request_count)
|
|
|
trace_block_plug(q);
|
|
|
else if (request_count >= BLK_MAX_REQUEST_COUNT) {
|
|
|
blk_flush_plug_list(plug, false);
|
|
@@ -1430,6 +1442,11 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
|
|
|
while (!list_empty(&tags->page_list)) {
|
|
|
page = list_first_entry(&tags->page_list, struct page, lru);
|
|
|
list_del_init(&page->lru);
|
|
|
+ /*
|
|
|
+ * Remove kmemleak object previously allocated in
|
|
|
+ * blk_mq_init_rq_map().
|
|
|
+ */
|
|
|
+ kmemleak_free(page_address(page));
|
|
|
__free_pages(page, page->private);
|
|
|
}
|
|
|
|
|
@@ -1502,6 +1519,11 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
|
|
|
list_add_tail(&page->lru, &tags->page_list);
|
|
|
|
|
|
p = page_address(page);
|
|
|
+ /*
|
|
|
+ * Allow kmemleak to scan these pages as they contain pointers
|
|
|
+ * to additional allocations like via ops->init_request().
|
|
|
+ */
|
|
|
+ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
|
|
|
entries_per_page = order_to_size(this_order) / rq_size;
|
|
|
to_do = min(entries_per_page, set->queue_depth - i);
|
|
|
left -= to_do * rq_size;
|
|
@@ -1673,7 +1695,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
|
|
INIT_LIST_HEAD(&hctx->dispatch);
|
|
|
hctx->queue = q;
|
|
|
hctx->queue_num = hctx_idx;
|
|
|
- hctx->flags = set->flags;
|
|
|
+ hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
|
|
|
|
|
|
blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
|
|
|
blk_mq_hctx_notify, hctx);
|
|
@@ -1860,27 +1882,26 @@ static void blk_mq_map_swqueue(struct request_queue *q,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
|
|
|
+static void queue_set_hctx_shared(struct request_queue *q, bool shared)
|
|
|
{
|
|
|
struct blk_mq_hw_ctx *hctx;
|
|
|
- struct request_queue *q;
|
|
|
- bool shared;
|
|
|
int i;
|
|
|
|
|
|
- if (set->tag_list.next == set->tag_list.prev)
|
|
|
- shared = false;
|
|
|
- else
|
|
|
- shared = true;
|
|
|
+ queue_for_each_hw_ctx(q, hctx, i) {
|
|
|
+ if (shared)
|
|
|
+ hctx->flags |= BLK_MQ_F_TAG_SHARED;
|
|
|
+ else
|
|
|
+ hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
|
|
|
+{
|
|
|
+ struct request_queue *q;
|
|
|
|
|
|
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
|
|
blk_mq_freeze_queue(q);
|
|
|
-
|
|
|
- queue_for_each_hw_ctx(q, hctx, i) {
|
|
|
- if (shared)
|
|
|
- hctx->flags |= BLK_MQ_F_TAG_SHARED;
|
|
|
- else
|
|
|
- hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
|
|
|
- }
|
|
|
+ queue_set_hctx_shared(q, shared);
|
|
|
blk_mq_unfreeze_queue(q);
|
|
|
}
|
|
|
}
|
|
@@ -1891,7 +1912,12 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
|
|
|
|
|
|
mutex_lock(&set->tag_list_lock);
|
|
|
list_del_init(&q->tag_set_list);
|
|
|
- blk_mq_update_tag_set_depth(set);
|
|
|
+ if (list_is_singular(&set->tag_list)) {
|
|
|
+ /* just transitioned to unshared */
|
|
|
+ set->flags &= ~BLK_MQ_F_TAG_SHARED;
|
|
|
+ /* update existing queue */
|
|
|
+ blk_mq_update_tag_set_depth(set, false);
|
|
|
+ }
|
|
|
mutex_unlock(&set->tag_list_lock);
|
|
|
}
|
|
|
|
|
@@ -1901,8 +1927,17 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
|
|
|
q->tag_set = set;
|
|
|
|
|
|
mutex_lock(&set->tag_list_lock);
|
|
|
+
|
|
|
+ /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
|
|
|
+ if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
|
|
|
+ set->flags |= BLK_MQ_F_TAG_SHARED;
|
|
|
+ /* update existing queue */
|
|
|
+ blk_mq_update_tag_set_depth(set, true);
|
|
|
+ }
|
|
|
+ if (set->flags & BLK_MQ_F_TAG_SHARED)
|
|
|
+ queue_set_hctx_shared(q, true);
|
|
|
list_add_tail(&q->tag_set_list, &set->tag_list);
|
|
|
- blk_mq_update_tag_set_depth(set);
|
|
|
+
|
|
|
mutex_unlock(&set->tag_list_lock);
|
|
|
}
|
|
|
|