|
@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
|
|
* Temporarilly set tasks mems_allowed to target nodes of migration,
|
|
|
* so that the migration code can allocate pages on these nodes.
|
|
|
*
|
|
|
- * Call holding cpuset_mutex, so current's cpuset won't change
|
|
|
- * during this call, as manage_mutex holds off any cpuset_attach()
|
|
|
- * calls. Therefore we don't need to take task_lock around the
|
|
|
- * call to guarantee_online_mems(), as we know no one is changing
|
|
|
- * our task's cpuset.
|
|
|
- *
|
|
|
* While the mm_struct we are migrating is typically from some
|
|
|
* other task, the task_struct mems_allowed that we are hacking
|
|
|
* is for our current task, which must allocate new pages for that
|
|
@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
|
|
|
|
|
|
do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
mems_cs = effective_nodemask_cpuset(task_cs(tsk));
|
|
|
guarantee_online_mems(mems_cs, &tsk->mems_allowed);
|
|
|
+ rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
|
|
|
|
|
|
task_lock(current);
|
|
|
cs = nearest_hardwall_ancestor(task_cs(current));
|
|
|
+ allowed = node_isset(node, cs->mems_allowed);
|
|
|
task_unlock(current);
|
|
|
|
|
|
- allowed = node_isset(node, cs->mems_allowed);
|
|
|
mutex_unlock(&callback_mutex);
|
|
|
return allowed;
|
|
|
}
|