|
@@ -77,6 +77,26 @@ struct cpuset {
|
|
|
|
|
|
unsigned long flags; /* "unsigned long" so bitops work */
|
|
|
|
|
|
+ /*
|
|
|
+ * On default hierarchy:
|
|
|
+ *
|
|
|
+ * The user-configured masks can only be changed by writing to
|
|
|
+ * cpuset.cpus and cpuset.mems, and won't be limited by the
|
|
|
+ * parent masks.
|
|
|
+ *
|
|
|
+ * The effective masks is the real masks that apply to the tasks
|
|
|
+ * in the cpuset. They may be changed if the configured masks are
|
|
|
+ * changed or hotplug happens.
|
|
|
+ *
|
|
|
+ * effective_mask == configured_mask & parent's effective_mask,
|
|
|
+ * and if it ends up empty, it will inherit the parent's mask.
|
|
|
+ *
|
|
|
+ *
|
|
|
+ * On legacy hierachy:
|
|
|
+ *
|
|
|
+ * The user-configured masks are always the same with effective masks.
|
|
|
+ */
|
|
|
+
|
|
|
/* user-configured CPUs and Memory Nodes allow to tasks */
|
|
|
cpumask_var_t cpus_allowed;
|
|
|
nodemask_t mems_allowed;
|
|
@@ -450,9 +470,9 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
|
|
|
|
|
|
par = parent_cs(cur);
|
|
|
|
|
|
- /* We must be a subset of our parent cpuset */
|
|
|
+ /* On legacy hiearchy, we must be a subset of our parent cpuset. */
|
|
|
ret = -EACCES;
|
|
|
- if (!is_cpuset_subset(trial, par))
|
|
|
+ if (!cgroup_on_dfl(cur->css.cgroup) && !is_cpuset_subset(trial, par))
|
|
|
goto out;
|
|
|
|
|
|
/*
|
|
@@ -2167,6 +2187,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
|
|
static cpumask_t new_cpus;
|
|
|
static nodemask_t new_mems;
|
|
|
bool cpus_updated, mems_updated;
|
|
|
+ bool on_dfl = cgroup_on_dfl(top_cpuset.css.cgroup);
|
|
|
|
|
|
mutex_lock(&cpuset_mutex);
|
|
|
|
|
@@ -2174,13 +2195,14 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
|
|
cpumask_copy(&new_cpus, cpu_active_mask);
|
|
|
new_mems = node_states[N_MEMORY];
|
|
|
|
|
|
- cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
|
|
|
- mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
|
|
|
+ cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
|
|
|
+ mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
|
|
|
|
|
|
/* synchronize cpus_allowed to cpu_active_mask */
|
|
|
if (cpus_updated) {
|
|
|
mutex_lock(&callback_mutex);
|
|
|
- cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
|
|
|
+ if (!on_dfl)
|
|
|
+ cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
|
|
|
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
/* we don't mess with cpumasks of tasks in top_cpuset */
|
|
@@ -2189,7 +2211,8 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
|
|
/* synchronize mems_allowed to N_MEMORY */
|
|
|
if (mems_updated) {
|
|
|
mutex_lock(&callback_mutex);
|
|
|
- top_cpuset.mems_allowed = new_mems;
|
|
|
+ if (!on_dfl)
|
|
|
+ top_cpuset.mems_allowed = new_mems;
|
|
|
top_cpuset.effective_mems = new_mems;
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
update_tasks_nodemask(&top_cpuset);
|