|
@@ -7112,9 +7112,6 @@ void __init sched_init(void)
|
|
|
#endif
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
|
|
|
-#endif
|
|
|
-#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
|
- alloc_size += num_possible_cpus() * cpumask_size();
|
|
|
#endif
|
|
|
if (alloc_size) {
|
|
|
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
|
|
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
|
|
|
ptr += nr_cpu_ids * sizeof(void **);
|
|
|
|
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
+ }
|
|
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
|
- for_each_possible_cpu(i) {
|
|
|
- per_cpu(load_balance_mask, i) = (void *)ptr;
|
|
|
- ptr += cpumask_size();
|
|
|
- }
|
|
|
-#endif /* CONFIG_CPUMASK_OFFSTACK */
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
+ per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
|
|
|
+ cpumask_size(), GFP_KERNEL, cpu_to_node(i));
|
|
|
}
|
|
|
+#endif /* CONFIG_CPUMASK_OFFSTACK */
|
|
|
|
|
|
init_rt_bandwidth(&def_rt_bandwidth,
|
|
|
global_rt_period(), global_rt_runtime());
|