|
@@ -298,6 +298,16 @@ static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
|
|
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
|
|
|
|
|
|
+/*
|
|
|
+ * Cgroup v2 behavior is used when on default hierarchy or the
|
|
|
+ * cgroup_v2_mode flag is set.
|
|
|
+ */
|
|
|
+static inline bool is_in_v2_mode(void)
|
|
|
+{
|
|
|
+ return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
|
|
|
+ (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* This is ugly, but preserves the userspace API for existing cpuset
|
|
|
* users. If someone tries to mount the "cpuset" filesystem, we
|
|
@@ -488,8 +498,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
|
|
|
|
|
|
/* On legacy hiearchy, we must be a subset of our parent cpuset. */
|
|
|
ret = -EACCES;
|
|
|
- if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
|
|
- !is_cpuset_subset(trial, par))
|
|
|
+ if (!is_in_v2_mode() && !is_cpuset_subset(trial, par))
|
|
|
goto out;
|
|
|
|
|
|
/*
|
|
@@ -895,8 +904,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
|
|
|
* If it becomes empty, inherit the effective mask of the
|
|
|
* parent, which is guaranteed to have some CPUs.
|
|
|
*/
|
|
|
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
|
|
- cpumask_empty(new_cpus))
|
|
|
+ if (is_in_v2_mode() && cpumask_empty(new_cpus))
|
|
|
cpumask_copy(new_cpus, parent->effective_cpus);
|
|
|
|
|
|
/* Skip the whole subtree if the cpumask remains the same. */
|
|
@@ -913,7 +921,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
|
|
|
cpumask_copy(cp->effective_cpus, new_cpus);
|
|
|
spin_unlock_irq(&callback_lock);
|
|
|
|
|
|
- WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
|
|
+ WARN_ON(!is_in_v2_mode() &&
|
|
|
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
|
|
|
|
|
|
update_tasks_cpumask(cp);
|
|
@@ -1149,8 +1157,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
|
|
|
* If it becomes empty, inherit the effective mask of the
|
|
|
* parent, which is guaranteed to have some MEMs.
|
|
|
*/
|
|
|
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
|
|
- nodes_empty(*new_mems))
|
|
|
+ if (is_in_v2_mode() && nodes_empty(*new_mems))
|
|
|
*new_mems = parent->effective_mems;
|
|
|
|
|
|
/* Skip the whole subtree if the nodemask remains the same. */
|
|
@@ -1167,7 +1174,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
|
|
|
cp->effective_mems = *new_mems;
|
|
|
spin_unlock_irq(&callback_lock);
|
|
|
|
|
|
- WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
|
|
+ WARN_ON(!is_in_v2_mode() &&
|
|
|
!nodes_equal(cp->mems_allowed, cp->effective_mems));
|
|
|
|
|
|
update_tasks_nodemask(cp);
|
|
@@ -1459,7 +1466,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
|
|
|
|
|
|
/* allow moving tasks into an empty cpuset if on default hierarchy */
|
|
|
ret = -ENOSPC;
|
|
|
- if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
|
|
+ if (!is_in_v2_mode() &&
|
|
|
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
|
|
|
goto out_unlock;
|
|
|
|
|
@@ -1977,7 +1984,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
|
|
|
cpuset_inc();
|
|
|
|
|
|
spin_lock_irq(&callback_lock);
|
|
|
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
|
|
|
+ if (is_in_v2_mode()) {
|
|
|
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
|
|
|
cs->effective_mems = parent->effective_mems;
|
|
|
}
|
|
@@ -2054,7 +2061,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
|
|
|
mutex_lock(&cpuset_mutex);
|
|
|
spin_lock_irq(&callback_lock);
|
|
|
|
|
|
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
|
|
|
+ if (is_in_v2_mode()) {
|
|
|
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
|
|
|
top_cpuset.mems_allowed = node_possible_map;
|
|
|
} else {
|
|
@@ -2248,7 +2255,7 @@ retry:
|
|
|
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
|
|
|
mems_updated = !nodes_equal(new_mems, cs->effective_mems);
|
|
|
|
|
|
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
|
|
|
+ if (is_in_v2_mode())
|
|
|
hotplug_update_tasks(cs, &new_cpus, &new_mems,
|
|
|
cpus_updated, mems_updated);
|
|
|
else
|
|
@@ -2279,7 +2286,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
|
|
static cpumask_t new_cpus;
|
|
|
static nodemask_t new_mems;
|
|
|
bool cpus_updated, mems_updated;
|
|
|
- bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
|
|
|
+ bool on_dfl = is_in_v2_mode();
|
|
|
|
|
|
mutex_lock(&cpuset_mutex);
|
|
|
|