|
@@ -269,11 +269,40 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void free_task(struct task_struct *tsk)
|
|
|
+static void release_task_stack(struct task_struct *tsk)
|
|
|
{
|
|
|
account_kernel_stack(tsk, -1);
|
|
|
arch_release_thread_stack(tsk->stack);
|
|
|
free_thread_stack(tsk);
|
|
|
+ tsk->stack = NULL;
|
|
|
+#ifdef CONFIG_VMAP_STACK
|
|
|
+ tsk->stack_vm_area = NULL;
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_THREAD_INFO_IN_TASK
|
|
|
+void put_task_stack(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ if (atomic_dec_and_test(&tsk->stack_refcount))
|
|
|
+ release_task_stack(tsk);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+void free_task(struct task_struct *tsk)
|
|
|
+{
|
|
|
+#ifndef CONFIG_THREAD_INFO_IN_TASK
|
|
|
+ /*
|
|
|
+ * The task is finally done with both the stack and thread_info,
|
|
|
+ * so free both.
|
|
|
+ */
|
|
|
+ release_task_stack(tsk);
|
|
|
+#else
|
|
|
+ /*
|
|
|
+ * If the task had a separate stack allocation, it should be gone
|
|
|
+ * by now.
|
|
|
+ */
|
|
|
+ WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
|
|
|
+#endif
|
|
|
rt_mutex_debug_task_free(tsk);
|
|
|
ftrace_graph_exit_task(tsk);
|
|
|
put_seccomp_filter(tsk);
|
|
@@ -411,6 +440,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
tsk->stack_vm_area = stack_vm_area;
|
|
|
#endif
|
|
|
+#ifdef CONFIG_THREAD_INFO_IN_TASK
|
|
|
+ atomic_set(&tsk->stack_refcount, 1);
|
|
|
+#endif
|
|
|
|
|
|
if (err)
|
|
|
goto free_stack;
|
|
@@ -1771,6 +1803,7 @@ bad_fork_cleanup_count:
|
|
|
atomic_dec(&p->cred->user->processes);
|
|
|
exit_creds(p);
|
|
|
bad_fork_free:
|
|
|
+ put_task_stack(p);
|
|
|
free_task(p);
|
|
|
fork_out:
|
|
|
return ERR_PTR(retval);
|