|
|
@@ -112,18 +112,22 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
|
|
|
*/
|
|
|
void blk_mq_freeze_queue(struct request_queue *q)
|
|
|
{
|
|
|
+ bool freeze;
|
|
|
+
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
- q->mq_freeze_depth++;
|
|
|
+ freeze = !q->mq_freeze_depth++;
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
|
|
- percpu_ref_kill(&q->mq_usage_counter);
|
|
|
- blk_mq_run_queues(q, false);
|
|
|
+ if (freeze) {
|
|
|
+ percpu_ref_kill(&q->mq_usage_counter);
|
|
|
+ blk_mq_run_queues(q, false);
|
|
|
+ }
|
|
|
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
|
|
|
}
|
|
|
|
|
|
static void blk_mq_unfreeze_queue(struct request_queue *q)
|
|
|
{
|
|
|
- bool wake = false;
|
|
|
+ bool wake;
|
|
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
wake = !--q->mq_freeze_depth;
|
|
|
@@ -172,6 +176,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
|
|
|
/* tag was already set */
|
|
|
rq->errors = 0;
|
|
|
|
|
|
+ rq->cmd = rq->__cmd;
|
|
|
+
|
|
|
rq->extra_len = 0;
|
|
|
rq->sense_len = 0;
|
|
|
rq->resid_len = 0;
|
|
|
@@ -1068,13 +1074,17 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
|
|
|
blk_account_io_start(rq, 1);
|
|
|
}
|
|
|
|
|
|
+static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
|
|
|
+{
|
|
|
+ return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
|
|
|
+ !blk_queue_nomerges(hctx->queue);
|
|
|
+}
|
|
|
+
|
|
|
static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
|
|
|
struct blk_mq_ctx *ctx,
|
|
|
struct request *rq, struct bio *bio)
|
|
|
{
|
|
|
- struct request_queue *q = hctx->queue;
|
|
|
-
|
|
|
- if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
|
|
|
+ if (!hctx_allow_merges(hctx)) {
|
|
|
blk_mq_bio_to_request(rq, bio);
|
|
|
spin_lock(&ctx->lock);
|
|
|
insert_rq:
|
|
|
@@ -1082,6 +1092,8 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
|
|
|
spin_unlock(&ctx->lock);
|
|
|
return false;
|
|
|
} else {
|
|
|
+ struct request_queue *q = hctx->queue;
|
|
|
+
|
|
|
spin_lock(&ctx->lock);
|
|
|
if (!blk_mq_attempt_merge(q, ctx, bio)) {
|
|
|
blk_mq_bio_to_request(rq, bio);
|
|
|
@@ -1574,7 +1586,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
|
|
|
hctx->tags = set->tags[i];
|
|
|
|
|
|
/*
|
|
|
- * Allocate space for all possible cpus to avoid allocation in
|
|
|
+ * Allocate space for all possible cpus to avoid allocation at
|
|
|
* runtime
|
|
|
*/
|
|
|
hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
|
|
|
@@ -1662,8 +1674,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
|
|
|
|
|
queue_for_each_hw_ctx(q, hctx, i) {
|
|
|
/*
|
|
|
- * If not software queues are mapped to this hardware queue,
|
|
|
- * disable it and free the request entries
|
|
|
+ * If no software queues are mapped to this hardware queue,
|
|
|
+ * disable it and free the request entries.
|
|
|
*/
|
|
|
if (!hctx->nr_ctx) {
|
|
|
struct blk_mq_tag_set *set = q->tag_set;
|
|
|
@@ -1713,14 +1725,10 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
|
|
|
{
|
|
|
struct blk_mq_tag_set *set = q->tag_set;
|
|
|
|
|
|
- blk_mq_freeze_queue(q);
|
|
|
-
|
|
|
mutex_lock(&set->tag_list_lock);
|
|
|
list_del_init(&q->tag_set_list);
|
|
|
blk_mq_update_tag_set_depth(set);
|
|
|
mutex_unlock(&set->tag_list_lock);
|
|
|
-
|
|
|
- blk_mq_unfreeze_queue(q);
|
|
|
}
|
|
|
|
|
|
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
|