|
|
@@ -8496,8 +8496,9 @@ void free_fair_sched_group(struct task_group *tg)
|
|
|
|
|
|
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
|
{
|
|
|
- struct cfs_rq *cfs_rq;
|
|
|
struct sched_entity *se;
|
|
|
+ struct cfs_rq *cfs_rq;
|
|
|
+ struct rq *rq;
|
|
|
int i;
|
|
|
|
|
|
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
|
|
|
@@ -8512,6 +8513,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
|
init_cfs_bandwidth(tg_cfs_bandwidth(tg));
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
|
+ rq = cpu_rq(i);
|
|
|
+
|
|
|
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
|
|
|
GFP_KERNEL, cpu_to_node(i));
|
|
|
if (!cfs_rq)
|
|
|
@@ -8525,7 +8528,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
|
init_cfs_rq(cfs_rq);
|
|
|
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
|
|
|
init_entity_runnable_average(se);
|
|
|
+
|
|
|
+ raw_spin_lock_irq(&rq->lock);
|
|
|
post_init_entity_util_avg(se);
|
|
|
+ raw_spin_unlock_irq(&rq->lock);
|
|
|
}
|
|
|
|
|
|
return 1;
|