|
@@ -2082,6 +2082,7 @@ retry:
|
|
|
|
|
|
mutex_lock(&callback_mutex);
|
|
|
cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
|
|
|
+ cpumask_andnot(cs->effective_cpus, cs->effective_cpus, &off_cpus);
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
|
|
|
/*
|
|
@@ -2096,6 +2097,7 @@ retry:
|
|
|
|
|
|
mutex_lock(&callback_mutex);
|
|
|
nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
|
|
|
+ nodes_andnot(cs->effective_mems, cs->effective_mems, off_mems);
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
|
|
|
/*
|
|
@@ -2159,6 +2161,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
|
|
if (cpus_updated) {
|
|
|
mutex_lock(&callback_mutex);
|
|
|
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
|
|
|
+ cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
/* we don't mess with cpumasks of tasks in top_cpuset */
|
|
|
}
|
|
@@ -2167,6 +2170,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
|
|
if (mems_updated) {
|
|
|
mutex_lock(&callback_mutex);
|
|
|
top_cpuset.mems_allowed = new_mems;
|
|
|
+ top_cpuset.effective_mems = new_mems;
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
update_tasks_nodemask(&top_cpuset);
|
|
|
}
|