|
@@ -31,14 +31,16 @@ static int get_first_sibling(unsigned int cpu)
|
|
return cpu;
|
|
return cpu;
|
|
}
|
|
}
|
|
|
|
|
|
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
|
|
|
|
- const struct cpumask *online_mask)
|
|
|
|
|
|
+int blk_mq_map_queues(struct blk_mq_tag_set *set)
|
|
{
|
|
{
|
|
|
|
+ unsigned int *map = set->mq_map;
|
|
|
|
+ unsigned int nr_queues = set->nr_hw_queues;
|
|
|
|
+ const struct cpumask *online_mask = cpu_online_mask;
|
|
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
|
|
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
|
|
cpumask_var_t cpus;
|
|
cpumask_var_t cpus;
|
|
|
|
|
|
if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
|
|
if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
|
|
- return 1;
|
|
|
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
cpumask_clear(cpus);
|
|
cpumask_clear(cpus);
|
|
nr_cpus = nr_uniq_cpus = 0;
|
|
nr_cpus = nr_uniq_cpus = 0;
|
|
@@ -86,23 +88,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
|
|
|
|
-{
|
|
|
|
- unsigned int *map;
|
|
|
|
-
|
|
|
|
- /* If cpus are offline, map them to first hctx */
|
|
|
|
- map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
|
|
|
|
- set->numa_node);
|
|
|
|
- if (!map)
|
|
|
|
- return NULL;
|
|
|
|
-
|
|
|
|
- if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
|
|
|
|
- return map;
|
|
|
|
-
|
|
|
|
- kfree(map);
|
|
|
|
- return NULL;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* We have no quick way of doing reverse lookups. This is only used at
|
|
* We have no quick way of doing reverse lookups. This is only used at
|
|
* queue init time, so runtime isn't important.
|
|
* queue init time, so runtime isn't important.
|