|
@@ -5371,6 +5371,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
|
|
|
case CPU_UP_PREPARE:
|
|
|
rq->calc_load_update = calc_load_update;
|
|
|
+ account_reset_rq(rq);
|
|
|
break;
|
|
|
|
|
|
case CPU_ONLINE:
|
|
@@ -7537,7 +7538,7 @@ void set_curr_task(int cpu, struct task_struct *p)
|
|
|
/* task_group_lock serializes the addition/removal of task groups */
|
|
|
static DEFINE_SPINLOCK(task_group_lock);
|
|
|
|
|
|
-static void free_sched_group(struct task_group *tg)
|
|
|
+static void sched_free_group(struct task_group *tg)
|
|
|
{
|
|
|
free_fair_sched_group(tg);
|
|
|
free_rt_sched_group(tg);
|
|
@@ -7563,7 +7564,7 @@ struct task_group *sched_create_group(struct task_group *parent)
|
|
|
return tg;
|
|
|
|
|
|
err:
|
|
|
- free_sched_group(tg);
|
|
|
+ sched_free_group(tg);
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
}
|
|
|
|
|
@@ -7583,17 +7584,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
|
|
|
}
|
|
|
|
|
|
/* rcu callback to free various structures associated with a task group */
|
|
|
-static void free_sched_group_rcu(struct rcu_head *rhp)
|
|
|
+static void sched_free_group_rcu(struct rcu_head *rhp)
|
|
|
{
|
|
|
/* now it should be safe to free those cfs_rqs */
|
|
|
- free_sched_group(container_of(rhp, struct task_group, rcu));
|
|
|
+ sched_free_group(container_of(rhp, struct task_group, rcu));
|
|
|
}
|
|
|
|
|
|
-/* Destroy runqueue etc associated with a task group */
|
|
|
void sched_destroy_group(struct task_group *tg)
|
|
|
{
|
|
|
/* wait for possible concurrent references to cfs_rqs complete */
|
|
|
- call_rcu(&tg->rcu, free_sched_group_rcu);
|
|
|
+ call_rcu(&tg->rcu, sched_free_group_rcu);
|
|
|
}
|
|
|
|
|
|
void sched_offline_group(struct task_group *tg)
|
|
@@ -8052,31 +8052,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
|
|
if (IS_ERR(tg))
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
+ sched_online_group(tg, parent);
|
|
|
+
|
|
|
return &tg->css;
|
|
|
}
|
|
|
|
|
|
-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
|
|
|
+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
|
|
|
{
|
|
|
struct task_group *tg = css_tg(css);
|
|
|
- struct task_group *parent = css_tg(css->parent);
|
|
|
|
|
|
- if (parent)
|
|
|
- sched_online_group(tg, parent);
|
|
|
- return 0;
|
|
|
+ sched_offline_group(tg);
|
|
|
}
|
|
|
|
|
|
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
|
|
|
{
|
|
|
struct task_group *tg = css_tg(css);
|
|
|
|
|
|
- sched_destroy_group(tg);
|
|
|
-}
|
|
|
-
|
|
|
-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
|
|
|
-{
|
|
|
- struct task_group *tg = css_tg(css);
|
|
|
-
|
|
|
- sched_offline_group(tg);
|
|
|
+ /*
|
|
|
+ * Relies on the RCU grace period between css_released() and this.
|
|
|
+ */
|
|
|
+ sched_free_group(tg);
|
|
|
}
|
|
|
|
|
|
static void cpu_cgroup_fork(struct task_struct *task)
|
|
@@ -8436,9 +8431,8 @@ static struct cftype cpu_files[] = {
|
|
|
|
|
|
struct cgroup_subsys cpu_cgrp_subsys = {
|
|
|
.css_alloc = cpu_cgroup_css_alloc,
|
|
|
+ .css_released = cpu_cgroup_css_released,
|
|
|
.css_free = cpu_cgroup_css_free,
|
|
|
- .css_online = cpu_cgroup_css_online,
|
|
|
- .css_offline = cpu_cgroup_css_offline,
|
|
|
.fork = cpu_cgroup_fork,
|
|
|
.can_attach = cpu_cgroup_can_attach,
|
|
|
.attach = cpu_cgroup_attach,
|