|
|
@@ -76,8 +76,14 @@ struct cpuset {
|
|
|
struct cgroup_subsys_state css;
|
|
|
|
|
|
unsigned long flags; /* "unsigned long" so bitops work */
|
|
|
- cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
|
|
|
- nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
|
|
|
+
|
|
|
+ /* user-configured CPUs and Memory Nodes allow to tasks */
|
|
|
+ cpumask_var_t cpus_allowed;
|
|
|
+ nodemask_t mems_allowed;
|
|
|
+
|
|
|
+ /* effective CPUs and Memory Nodes allow to tasks */
|
|
|
+ cpumask_var_t effective_cpus;
|
|
|
+ nodemask_t effective_mems;
|
|
|
|
|
|
/*
|
|
|
* This is old Memory Nodes tasks took on.
|
|
|
@@ -376,13 +382,20 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
|
|
|
if (!trial)
|
|
|
return NULL;
|
|
|
|
|
|
- if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
|
|
|
- kfree(trial);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
|
|
|
+ if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
|
|
|
+ goto free_cs;
|
|
|
+ if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
|
|
|
+ goto free_cpus;
|
|
|
|
|
|
+ cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
|
|
|
+ cpumask_copy(trial->effective_cpus, cs->effective_cpus);
|
|
|
return trial;
|
|
|
+
|
|
|
+free_cpus:
|
|
|
+ free_cpumask_var(trial->cpus_allowed);
|
|
|
+free_cs:
|
|
|
+ kfree(trial);
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -391,6 +404,7 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
|
|
|
*/
|
|
|
static void free_trial_cpuset(struct cpuset *trial)
|
|
|
{
|
|
|
+ free_cpumask_var(trial->effective_cpus);
|
|
|
free_cpumask_var(trial->cpus_allowed);
|
|
|
kfree(trial);
|
|
|
}
|
|
|
@@ -1848,18 +1862,26 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
|
|
|
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
|
|
|
if (!cs)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
- if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
|
|
|
- kfree(cs);
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
- }
|
|
|
+ if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
|
|
|
+ goto free_cs;
|
|
|
+ if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
|
|
|
+ goto free_cpus;
|
|
|
|
|
|
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
|
|
|
cpumask_clear(cs->cpus_allowed);
|
|
|
nodes_clear(cs->mems_allowed);
|
|
|
+ cpumask_clear(cs->effective_cpus);
|
|
|
+ nodes_clear(cs->effective_mems);
|
|
|
fmeter_init(&cs->fmeter);
|
|
|
cs->relax_domain_level = -1;
|
|
|
|
|
|
return &cs->css;
|
|
|
+
|
|
|
+free_cpus:
|
|
|
+ free_cpumask_var(cs->cpus_allowed);
|
|
|
+free_cs:
|
|
|
+ kfree(cs);
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
}
|
|
|
|
|
|
static int cpuset_css_online(struct cgroup_subsys_state *css)
|
|
|
@@ -1882,6 +1904,13 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
|
|
|
|
|
|
cpuset_inc();
|
|
|
|
|
|
+ mutex_lock(&callback_mutex);
|
|
|
+ if (cgroup_on_dfl(cs->css.cgroup)) {
|
|
|
+ cpumask_copy(cs->effective_cpus, parent->effective_cpus);
|
|
|
+ cs->effective_mems = parent->effective_mems;
|
|
|
+ }
|
|
|
+ mutex_unlock(&callback_mutex);
|
|
|
+
|
|
|
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
|
|
|
goto out_unlock;
|
|
|
|
|
|
@@ -1941,6 +1970,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
|
|
|
{
|
|
|
struct cpuset *cs = css_cs(css);
|
|
|
|
|
|
+ free_cpumask_var(cs->effective_cpus);
|
|
|
free_cpumask_var(cs->cpus_allowed);
|
|
|
kfree(cs);
|
|
|
}
|
|
|
@@ -1969,9 +1999,13 @@ int __init cpuset_init(void)
|
|
|
|
|
|
if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
|
|
|
BUG();
|
|
|
+ if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
|
|
|
+ BUG();
|
|
|
|
|
|
cpumask_setall(top_cpuset.cpus_allowed);
|
|
|
nodes_setall(top_cpuset.mems_allowed);
|
|
|
+ cpumask_setall(top_cpuset.effective_cpus);
|
|
|
+ nodes_setall(top_cpuset.effective_mems);
|
|
|
|
|
|
fmeter_init(&top_cpuset.fmeter);
|
|
|
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
|
|
|
@@ -2207,6 +2241,9 @@ void __init cpuset_init_smp(void)
|
|
|
top_cpuset.mems_allowed = node_states[N_MEMORY];
|
|
|
top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
|
|
|
|
|
|
+ cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
|
|
|
+ top_cpuset.effective_mems = node_states[N_MEMORY];
|
|
|
+
|
|
|
register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
|
|
|
}
|
|
|
|