|
@@ -1845,11 +1845,13 @@ static __latent_entropy struct task_struct *copy_process(
|
|
*/
|
|
*/
|
|
recalc_sigpending();
|
|
recalc_sigpending();
|
|
if (signal_pending(current)) {
|
|
if (signal_pending(current)) {
|
|
- spin_unlock(¤t->sighand->siglock);
|
|
|
|
- write_unlock_irq(&tasklist_lock);
|
|
|
|
retval = -ERESTARTNOINTR;
|
|
retval = -ERESTARTNOINTR;
|
|
goto bad_fork_cancel_cgroup;
|
|
goto bad_fork_cancel_cgroup;
|
|
}
|
|
}
|
|
|
|
+ if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
|
|
|
|
+ retval = -ENOMEM;
|
|
|
|
+ goto bad_fork_cancel_cgroup;
|
|
|
|
+ }
|
|
|
|
|
|
if (likely(p->pid)) {
|
|
if (likely(p->pid)) {
|
|
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
|
|
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
|
|
@@ -1907,6 +1909,8 @@ static __latent_entropy struct task_struct *copy_process(
|
|
return p;
|
|
return p;
|
|
|
|
|
|
bad_fork_cancel_cgroup:
|
|
bad_fork_cancel_cgroup:
|
|
|
|
+ spin_unlock(¤t->sighand->siglock);
|
|
|
|
+ write_unlock_irq(&tasklist_lock);
|
|
cgroup_cancel_fork(p);
|
|
cgroup_cancel_fork(p);
|
|
bad_fork_free_pid:
|
|
bad_fork_free_pid:
|
|
cgroup_threadgroup_change_end(current);
|
|
cgroup_threadgroup_change_end(current);
|