|
@@ -47,18 +47,20 @@ static DEFINE_SPINLOCK(zone_scan_lock);
|
|
|
#ifdef CONFIG_NUMA
|
|
|
/**
|
|
|
* has_intersects_mems_allowed() - check task eligiblity for kill
|
|
|
- * @tsk: task struct of which task to consider
|
|
|
+ * @start: task struct of which task to consider
|
|
|
* @mask: nodemask passed to page allocator for mempolicy ooms
|
|
|
*
|
|
|
* Task eligibility is determined by whether or not a candidate task, @tsk,
|
|
|
* shares the same mempolicy nodes as current if it is bound by such a policy
|
|
|
* and whether or not it has the same set of allowed cpuset nodes.
|
|
|
*/
|
|
|
-static bool has_intersects_mems_allowed(struct task_struct *tsk,
|
|
|
+static bool has_intersects_mems_allowed(struct task_struct *start,
|
|
|
const nodemask_t *mask)
|
|
|
{
|
|
|
- struct task_struct *start = tsk;
|
|
|
+ struct task_struct *tsk;
|
|
|
+ bool ret = false;
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
for_each_thread(start, tsk) {
|
|
|
if (mask) {
|
|
|
/*
|
|
@@ -67,19 +69,20 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
|
|
|
* mempolicy intersects current, otherwise it may be
|
|
|
* needlessly killed.
|
|
|
*/
|
|
|
- if (mempolicy_nodemask_intersects(tsk, mask))
|
|
|
- return true;
|
|
|
+ ret = mempolicy_nodemask_intersects(tsk, mask);
|
|
|
} else {
|
|
|
/*
|
|
|
* This is not a mempolicy constrained oom, so only
|
|
|
* check the mems of tsk's cpuset.
|
|
|
*/
|
|
|
- if (cpuset_mems_allowed_intersects(current, tsk))
|
|
|
- return true;
|
|
|
+ ret = cpuset_mems_allowed_intersects(current, tsk);
|
|
|
}
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
}
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
- return false;
|
|
|
+ return ret;
|
|
|
}
|
|
|
#else
|
|
|
static bool has_intersects_mems_allowed(struct task_struct *tsk,
|