|
@@ -8624,10 +8624,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
init_cfs_rq(cfs_rq);
|
|
init_cfs_rq(cfs_rq);
|
|
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
|
|
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
|
|
init_entity_runnable_average(se);
|
|
init_entity_runnable_average(se);
|
|
-
|
|
|
|
- raw_spin_lock_irq(&rq->lock);
|
|
|
|
- post_init_entity_util_avg(se);
|
|
|
|
- raw_spin_unlock_irq(&rq->lock);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
return 1;
|
|
return 1;
|
|
@@ -8638,6 +8634,22 @@ err:
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void online_fair_sched_group(struct task_group *tg)
|
|
|
|
+{
|
|
|
|
+ struct sched_entity *se;
|
|
|
|
+ struct rq *rq;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
|
+ rq = cpu_rq(i);
|
|
|
|
+ se = tg->se[i];
|
|
|
|
+
|
|
|
|
+ raw_spin_lock_irq(&rq->lock);
|
|
|
|
+ post_init_entity_util_avg(se);
|
|
|
|
+ raw_spin_unlock_irq(&rq->lock);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
void unregister_fair_sched_group(struct task_group *tg)
|
|
void unregister_fair_sched_group(struct task_group *tg)
|
|
{
|
|
{
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
@@ -8742,6 +8754,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void online_fair_sched_group(struct task_group *tg) { }
|
|
|
|
+
|
|
void unregister_fair_sched_group(struct task_group *tg) { }
|
|
void unregister_fair_sched_group(struct task_group *tg) { }
|
|
|
|
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|