|
@@ -440,7 +440,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
|
|
blk_queue_exit(q);
|
|
|
return ERR_PTR(-EXDEV);
|
|
|
}
|
|
|
- cpu = cpumask_first(alloc_data.hctx->cpumask);
|
|
|
+ cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
|
|
|
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
|
|
|
|
|
|
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
|
@@ -1324,9 +1324,10 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
|
|
if (--hctx->next_cpu_batch <= 0) {
|
|
|
int next_cpu;
|
|
|
|
|
|
- next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
|
|
|
+ next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
|
|
|
+ cpu_online_mask);
|
|
|
if (next_cpu >= nr_cpu_ids)
|
|
|
- next_cpu = cpumask_first(hctx->cpumask);
|
|
|
+ next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
|
|
|
|
|
|
hctx->next_cpu = next_cpu;
|
|
|
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
|
|
@@ -2220,16 +2221,11 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
|
|
|
INIT_LIST_HEAD(&__ctx->rq_list);
|
|
|
__ctx->queue = q;
|
|
|
|
|
|
- /* If the cpu isn't present, the cpu is mapped to first hctx */
|
|
|
- if (!cpu_present(i))
|
|
|
- continue;
|
|
|
-
|
|
|
- hctx = blk_mq_map_queue(q, i);
|
|
|
-
|
|
|
/*
|
|
|
* Set local node, IFF we have more than one hw queue. If
|
|
|
* not, we remain on the home node of the device
|
|
|
*/
|
|
|
+ hctx = blk_mq_map_queue(q, i);
|
|
|
if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
|
|
|
hctx->numa_node = local_memory_node(cpu_to_node(i));
|
|
|
}
|
|
@@ -2286,7 +2282,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
|
|
*
|
|
|
* If the cpu isn't present, the cpu is mapped to first hctx.
|
|
|
*/
|
|
|
- for_each_present_cpu(i) {
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
hctx_idx = q->mq_map[i];
|
|
|
/* unmapped hw queue can be remapped after CPU topo changed */
|
|
|
if (!set->tags[hctx_idx] &&
|
|
@@ -2340,7 +2336,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
|
|
/*
|
|
|
* Initialize batch roundrobin counts
|
|
|
*/
|
|
|
- hctx->next_cpu = cpumask_first(hctx->cpumask);
|
|
|
+ hctx->next_cpu = cpumask_first_and(hctx->cpumask,
|
|
|
+ cpu_online_mask);
|
|
|
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
|
|
|
}
|
|
|
}
|