|
@@ -1861,7 +1861,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
|
|
|
hctx->tags = set->tags[i];
|
|
|
WARN_ON(!hctx->tags);
|
|
|
|
|
|
- cpumask_copy(hctx->tags->cpumask, hctx->cpumask);
|
|
|
/*
|
|
|
* Set the map size to the number of mapped software queues.
|
|
|
* This is more accurate and more efficient than looping
|
|
@@ -2272,11 +2271,29 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags)
|
|
|
+static int blk_mq_create_mq_map(struct blk_mq_tag_set *set,
|
|
|
+ const struct cpumask *affinity_mask)
|
|
|
{
|
|
|
- return tags->cpumask;
|
|
|
+ int queue = -1, cpu = 0;
|
|
|
+
|
|
|
+ set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
|
|
|
+ GFP_KERNEL, set->numa_node);
|
|
|
+ if (!set->mq_map)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ if (!affinity_mask)
|
|
|
+ return 0; /* map all cpus to queue 0 */
|
|
|
+
|
|
|
+ /* If cpus are offline, map them to first hctx */
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
+ if (cpumask_test_cpu(cpu, affinity_mask))
|
|
|
+ queue++;
|
|
|
+ if (queue >= 0)
|
|
|
+ set->mq_map[cpu] = queue;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
|
|
|
|
|
|
/*
|
|
|
* Alloc a tag set to be associated with one or more request queues.
|