|
@@ -677,8 +677,11 @@ static void blk_mq_rq_timer(unsigned long priv)
|
|
|
data.next = blk_rq_timeout(round_jiffies_up(data.next));
|
|
|
mod_timer(&q->timeout, data.next);
|
|
|
} else {
|
|
|
- queue_for_each_hw_ctx(q, hctx, i)
|
|
|
- blk_mq_tag_idle(hctx);
|
|
|
+ queue_for_each_hw_ctx(q, hctx, i) {
|
|
|
+ /* the hctx may be unmapped, so check it here */
|
|
|
+ if (blk_mq_hw_queue_mapped(hctx))
|
|
|
+ blk_mq_tag_idle(hctx);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -855,6 +858,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|
|
spin_lock(&hctx->lock);
|
|
|
list_splice(&rq_list, &hctx->dispatch);
|
|
|
spin_unlock(&hctx->lock);
|
|
|
+ /*
|
|
|
+ * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
|
|
|
+ * it's possible the queue is stopped and restarted again
|
|
|
+ * before this. Queue restart will dispatch requests. And since
|
|
|
+ * requests in rq_list aren't added into hctx->dispatch yet,
|
|
|
+ * the requests in rq_list might get lost.
|
|
|
+ *
|
|
|
+ * blk_mq_run_hw_queue() already checks the STOPPED bit
|
|
|
+ **/
|
|
|
+ blk_mq_run_hw_queue(hctx, true);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1571,22 +1584,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
|
|
|
-static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
|
|
|
-{
|
|
|
- struct request_queue *q = hctx->queue;
|
|
|
- struct blk_mq_tag_set *set = q->tag_set;
|
|
|
-
|
|
|
- if (set->tags[hctx->queue_num])
|
|
|
- return NOTIFY_OK;
|
|
|
-
|
|
|
- set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
|
|
|
- if (!set->tags[hctx->queue_num])
|
|
|
- return NOTIFY_STOP;
|
|
|
-
|
|
|
- hctx->tags = set->tags[hctx->queue_num];
|
|
|
- return NOTIFY_OK;
|
|
|
-}
|
|
|
-
|
|
|
static int blk_mq_hctx_notify(void *data, unsigned long action,
|
|
|
unsigned int cpu)
|
|
|
{
|
|
@@ -1594,8 +1591,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
|
|
|
|
|
|
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
|
|
|
return blk_mq_hctx_cpu_offline(hctx, cpu);
|
|
|
- else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
|
|
|
- return blk_mq_hctx_cpu_online(hctx, cpu);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * In case of CPU online, tags may be reallocated
|
|
|
+ * in blk_mq_map_swqueue() after mapping is updated.
|
|
|
+ */
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
@@ -1775,6 +1775,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
|
|
unsigned int i;
|
|
|
struct blk_mq_hw_ctx *hctx;
|
|
|
struct blk_mq_ctx *ctx;
|
|
|
+ struct blk_mq_tag_set *set = q->tag_set;
|
|
|
|
|
|
queue_for_each_hw_ctx(q, hctx, i) {
|
|
|
cpumask_clear(hctx->cpumask);
|
|
@@ -1803,16 +1804,20 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
|
|
* disable it and free the request entries.
|
|
|
*/
|
|
|
if (!hctx->nr_ctx) {
|
|
|
- struct blk_mq_tag_set *set = q->tag_set;
|
|
|
-
|
|
|
if (set->tags[i]) {
|
|
|
blk_mq_free_rq_map(set, set->tags[i], i);
|
|
|
set->tags[i] = NULL;
|
|
|
- hctx->tags = NULL;
|
|
|
}
|
|
|
+ hctx->tags = NULL;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
+ /* unmapped hw queue can be remapped after CPU topo changed */
|
|
|
+ if (!set->tags[i])
|
|
|
+ set->tags[i] = blk_mq_init_rq_map(set, i);
|
|
|
+ hctx->tags = set->tags[i];
|
|
|
+ WARN_ON(!hctx->tags);
|
|
|
+
|
|
|
/*
|
|
|
* Set the map size to the number of mapped software queues.
|
|
|
* This is more accurate and more efficient than looping
|
|
@@ -2090,9 +2095,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
|
|
|
*/
|
|
|
list_for_each_entry(q, &all_q_list, all_q_node)
|
|
|
blk_mq_freeze_queue_start(q);
|
|
|
- list_for_each_entry(q, &all_q_list, all_q_node)
|
|
|
+ list_for_each_entry(q, &all_q_list, all_q_node) {
|
|
|
blk_mq_freeze_queue_wait(q);
|
|
|
|
|
|
+ /*
|
|
|
+ * timeout handler can't touch hw queue during the
|
|
|
+ * reinitialization
|
|
|
+ */
|
|
|
+ del_timer_sync(&q->timeout);
|
|
|
+ }
|
|
|
+
|
|
|
list_for_each_entry(q, &all_q_list, all_q_node)
|
|
|
blk_mq_queue_reinit(q);
|
|
|
|