|
@@ -855,36 +855,45 @@ static void update_tasks_cpumask(struct cpuset *cs)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
|
|
|
|
- * @root_cs: the root cpuset of the hierarchy
|
|
|
|
- * @update_root: update root cpuset or not?
|
|
|
|
|
|
+ * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
|
|
|
|
+ * @cs: the cpuset to consider
|
|
|
|
+ * @new_cpus: temp variable for calculating new effective_cpus
|
|
|
|
+ *
|
|
|
|
+ * When congifured cpumask is changed, the effective cpumasks of this cpuset
|
|
|
|
+ * and all its descendants need to be updated.
|
|
*
|
|
*
|
|
- * This will update cpumasks of tasks in @root_cs and all other empty cpusets
|
|
|
|
- * which take on cpumask of @root_cs.
|
|
|
|
|
|
+ * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
|
|
*
|
|
*
|
|
* Called with cpuset_mutex held
|
|
* Called with cpuset_mutex held
|
|
*/
|
|
*/
|
|
-static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root)
|
|
|
|
|
|
+static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
|
|
{
|
|
{
|
|
struct cpuset *cp;
|
|
struct cpuset *cp;
|
|
struct cgroup_subsys_state *pos_css;
|
|
struct cgroup_subsys_state *pos_css;
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
- cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
|
|
|
|
- if (cp == root_cs) {
|
|
|
|
- if (!update_root)
|
|
|
|
- continue;
|
|
|
|
- } else {
|
|
|
|
- /* skip the whole subtree if @cp have some CPU */
|
|
|
|
- if (!cpumask_empty(cp->cpus_allowed)) {
|
|
|
|
- pos_css = css_rightmost_descendant(pos_css);
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
|
|
+ cpuset_for_each_descendant_pre(cp, pos_css, cs) {
|
|
|
|
+ struct cpuset *parent = parent_cs(cp);
|
|
|
|
+
|
|
|
|
+ cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus);
|
|
|
|
+
|
|
|
|
+ /* Skip the whole subtree if the cpumask remains the same. */
|
|
|
|
+ if (cpumask_equal(new_cpus, cp->effective_cpus)) {
|
|
|
|
+ pos_css = css_rightmost_descendant(pos_css);
|
|
|
|
+ continue;
|
|
}
|
|
}
|
|
|
|
+
|
|
if (!css_tryget_online(&cp->css))
|
|
if (!css_tryget_online(&cp->css))
|
|
continue;
|
|
continue;
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
+ mutex_lock(&callback_mutex);
|
|
|
|
+ cpumask_copy(cp->effective_cpus, new_cpus);
|
|
|
|
+ mutex_unlock(&callback_mutex);
|
|
|
|
+
|
|
|
|
+ WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
|
|
|
|
+ !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
|
|
|
|
+
|
|
update_tasks_cpumask(cp);
|
|
update_tasks_cpumask(cp);
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
@@ -940,7 +949,8 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
|
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
|
|
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
|
|
mutex_unlock(&callback_mutex);
|
|
mutex_unlock(&callback_mutex);
|
|
|
|
|
|
- update_tasks_cpumask_hier(cs, true);
|
|
|
|
|
|
+ /* use trialcs->cpus_allowed as a temp variable */
|
|
|
|
+ update_cpumasks_hier(cs, trialcs->cpus_allowed);
|
|
|
|
|
|
if (is_load_balanced)
|
|
if (is_load_balanced)
|
|
rebuild_sched_domains_locked();
|
|
rebuild_sched_domains_locked();
|
|
@@ -1091,36 +1101,45 @@ static void update_tasks_nodemask(struct cpuset *cs)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
|
|
|
|
- * @cs: the root cpuset of the hierarchy
|
|
|
|
- * @update_root: update the root cpuset or not?
|
|
|
|
|
|
+ * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
|
|
|
|
+ * @cs: the cpuset to consider
|
|
|
|
+ * @new_mems: a temp variable for calculating new effective_mems
|
|
|
|
+ *
|
|
|
|
+ * When configured nodemask is changed, the effective nodemasks of this cpuset
|
|
|
|
+ * and all its descendants need to be updated.
|
|
*
|
|
*
|
|
- * This will update nodemasks of tasks in @root_cs and all other empty cpusets
|
|
|
|
- * which take on nodemask of @root_cs.
|
|
|
|
|
|
+ * On legacy hiearchy, effective_mems will be the same with mems_allowed.
|
|
*
|
|
*
|
|
* Called with cpuset_mutex held
|
|
* Called with cpuset_mutex held
|
|
*/
|
|
*/
|
|
-static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root)
|
|
|
|
|
|
+static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
|
|
{
|
|
{
|
|
struct cpuset *cp;
|
|
struct cpuset *cp;
|
|
struct cgroup_subsys_state *pos_css;
|
|
struct cgroup_subsys_state *pos_css;
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
- cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
|
|
|
|
- if (cp == root_cs) {
|
|
|
|
- if (!update_root)
|
|
|
|
- continue;
|
|
|
|
- } else {
|
|
|
|
- /* skip the whole subtree if @cp have some CPU */
|
|
|
|
- if (!nodes_empty(cp->mems_allowed)) {
|
|
|
|
- pos_css = css_rightmost_descendant(pos_css);
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
|
|
+ cpuset_for_each_descendant_pre(cp, pos_css, cs) {
|
|
|
|
+ struct cpuset *parent = parent_cs(cp);
|
|
|
|
+
|
|
|
|
+ nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
|
|
|
|
+
|
|
|
|
+ /* Skip the whole subtree if the nodemask remains the same. */
|
|
|
|
+ if (nodes_equal(*new_mems, cp->effective_mems)) {
|
|
|
|
+ pos_css = css_rightmost_descendant(pos_css);
|
|
|
|
+ continue;
|
|
}
|
|
}
|
|
|
|
+
|
|
if (!css_tryget_online(&cp->css))
|
|
if (!css_tryget_online(&cp->css))
|
|
continue;
|
|
continue;
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
+ mutex_lock(&callback_mutex);
|
|
|
|
+ cp->effective_mems = *new_mems;
|
|
|
|
+ mutex_unlock(&callback_mutex);
|
|
|
|
+
|
|
|
|
+ WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
|
|
|
|
+ nodes_equal(cp->mems_allowed, cp->effective_mems));
|
|
|
|
+
|
|
update_tasks_nodemask(cp);
|
|
update_tasks_nodemask(cp);
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
@@ -1188,7 +1207,8 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
|
|
cs->mems_allowed = trialcs->mems_allowed;
|
|
cs->mems_allowed = trialcs->mems_allowed;
|
|
mutex_unlock(&callback_mutex);
|
|
mutex_unlock(&callback_mutex);
|
|
|
|
|
|
- update_tasks_nodemask_hier(cs, true);
|
|
|
|
|
|
+ /* use trialcs->mems_allowed as a temp variable */
|
|
|
|
+ update_nodemasks_hier(cs, &cs->mems_allowed);
|
|
done:
|
|
done:
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|