|
@@ -111,14 +111,11 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
|
|
{
|
|
|
if (tg != &root_task_group)
|
|
|
return false;
|
|
|
-
|
|
|
/*
|
|
|
- * We can only assume the task group can't go away on us if
|
|
|
- * autogroup_move_group() can see us on ->thread_group list.
|
|
|
+ * If we race with autogroup_move_group() the caller can use the old
|
|
|
+ * value of signal->autogroup but in this case sched_move_task() will
|
|
|
+ * be called again before autogroup_kref_put().
|
|
|
*/
|
|
|
- if (p->flags & PF_EXITING)
|
|
|
- return false;
|
|
|
-
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -138,13 +135,17 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
|
|
}
|
|
|
|
|
|
p->signal->autogroup = autogroup_kref_get(ag);
|
|
|
-
|
|
|
- if (!READ_ONCE(sysctl_sched_autogroup_enabled))
|
|
|
- goto out;
|
|
|
-
|
|
|
+ /*
|
|
|
+ * We can't avoid sched_move_task() after we changed signal->autogroup,
|
|
|
+ * this process can already run with task_group() == prev->tg or we can
|
|
|
+ * race with cgroup code which can read autogroup = prev under rq->lock.
|
|
|
+ * In the latter case for_each_thread() can not miss a migrating thread,
|
|
|
+ * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
|
|
|
+ * can't be removed from thread list, we hold ->siglock.
|
|
|
+ */
|
|
|
for_each_thread(p, t)
|
|
|
sched_move_task(t);
|
|
|
-out:
|
|
|
+
|
|
|
unlock_task_sighand(p, &flags);
|
|
|
autogroup_kref_put(prev);
|
|
|
}
|