|
@@ -8234,11 +8234,8 @@ void free_fair_sched_group(struct task_group *tg)
|
|
for_each_possible_cpu(i) {
|
|
for_each_possible_cpu(i) {
|
|
if (tg->cfs_rq)
|
|
if (tg->cfs_rq)
|
|
kfree(tg->cfs_rq[i]);
|
|
kfree(tg->cfs_rq[i]);
|
|
- if (tg->se) {
|
|
|
|
- if (tg->se[i])
|
|
|
|
- remove_entity_load_avg(tg->se[i]);
|
|
|
|
|
|
+ if (tg->se)
|
|
kfree(tg->se[i]);
|
|
kfree(tg->se[i]);
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
kfree(tg->cfs_rq);
|
|
kfree(tg->cfs_rq);
|
|
@@ -8286,21 +8283,29 @@ err:
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-void unregister_fair_sched_group(struct task_group *tg, int cpu)
|
|
|
|
|
|
+void unregister_fair_sched_group(struct task_group *tg)
|
|
{
|
|
{
|
|
- struct rq *rq = cpu_rq(cpu);
|
|
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
+ struct rq *rq;
|
|
|
|
+ int cpu;
|
|
|
|
|
|
- /*
|
|
|
|
- * Only empty task groups can be destroyed; so we can speculatively
|
|
|
|
- * check on_list without danger of it being re-added.
|
|
|
|
- */
|
|
|
|
- if (!tg->cfs_rq[cpu]->on_list)
|
|
|
|
- return;
|
|
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
|
+ if (tg->se[cpu])
|
|
|
|
+ remove_entity_load_avg(tg->se[cpu]);
|
|
|
|
|
|
- raw_spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
|
|
|
|
- raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Only empty task groups can be destroyed; so we can speculatively
|
|
|
|
+ * check on_list without danger of it being re-added.
|
|
|
|
+ */
|
|
|
|
+ if (!tg->cfs_rq[cpu]->on_list)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ rq = cpu_rq(cpu);
|
|
|
|
+
|
|
|
|
+ raw_spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
+ list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
|
|
|
|
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
|
|
void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
|
|
@@ -8382,7 +8387,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
-void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
|
|
|
|
|
|
+void unregister_fair_sched_group(struct task_group *tg) { }
|
|
|
|
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
|
|