|
@@ -7370,6 +7370,9 @@ int in_sched_functions(unsigned long addr)
|
|
|
*/
|
|
|
struct task_group root_task_group;
|
|
|
LIST_HEAD(task_groups);
|
|
|
+
|
|
|
+/* Cacheline aligned slab cache for task_group */
|
|
|
+static struct kmem_cache *task_group_cache __read_mostly;
|
|
|
#endif
|
|
|
|
|
|
DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
|
|
@@ -7427,11 +7430,12 @@ void __init sched_init(void)
|
|
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
|
|
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
|
+ task_group_cache = KMEM_CACHE(task_group, 0);
|
|
|
+
|
|
|
list_add(&root_task_group.list, &task_groups);
|
|
|
INIT_LIST_HEAD(&root_task_group.children);
|
|
|
INIT_LIST_HEAD(&root_task_group.siblings);
|
|
|
autogroup_init(&init_task);
|
|
|
-
|
|
|
#endif /* CONFIG_CGROUP_SCHED */
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
@@ -7712,7 +7716,7 @@ static void free_sched_group(struct task_group *tg)
|
|
|
free_fair_sched_group(tg);
|
|
|
free_rt_sched_group(tg);
|
|
|
autogroup_free(tg);
|
|
|
- kfree(tg);
|
|
|
+ kmem_cache_free(task_group_cache, tg);
|
|
|
}
|
|
|
|
|
|
/* allocate runqueue etc for a new task group */
|
|
@@ -7720,7 +7724,7 @@ struct task_group *sched_create_group(struct task_group *parent)
|
|
|
{
|
|
|
struct task_group *tg;
|
|
|
|
|
|
- tg = kzalloc(sizeof(*tg), GFP_KERNEL);
|
|
|
+ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
|
|
|
if (!tg)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|