|
@@ -340,13 +340,14 @@ void set_task_stack_end_magic(struct task_struct *tsk)
|
|
|
*stackend = STACK_END_MAGIC; /* for overflow detection */
|
|
|
}
|
|
|
|
|
|
-static struct task_struct *dup_task_struct(struct task_struct *orig)
|
|
|
+static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
|
|
{
|
|
|
struct task_struct *tsk;
|
|
|
struct thread_info *ti;
|
|
|
- int node = tsk_fork_get_node(orig);
|
|
|
int err;
|
|
|
|
|
|
+ if (node == NUMA_NO_NODE)
|
|
|
+ node = tsk_fork_get_node(orig);
|
|
|
tsk = alloc_task_struct_node(node);
|
|
|
if (!tsk)
|
|
|
return NULL;
|
|
@@ -1276,7 +1277,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
|
int __user *child_tidptr,
|
|
|
struct pid *pid,
|
|
|
int trace,
|
|
|
- unsigned long tls)
|
|
|
+ unsigned long tls,
|
|
|
+ int node)
|
|
|
{
|
|
|
int retval;
|
|
|
struct task_struct *p;
|
|
@@ -1328,7 +1330,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
|
goto fork_out;
|
|
|
|
|
|
retval = -ENOMEM;
|
|
|
- p = dup_task_struct(current);
|
|
|
+ p = dup_task_struct(current, node);
|
|
|
if (!p)
|
|
|
goto fork_out;
|
|
|
|
|
@@ -1706,7 +1708,8 @@ static inline void init_idle_pids(struct pid_link *links)
|
|
|
struct task_struct *fork_idle(int cpu)
|
|
|
{
|
|
|
struct task_struct *task;
|
|
|
- task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0);
|
|
|
+ task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
|
|
|
+ cpu_to_node(cpu));
|
|
|
if (!IS_ERR(task)) {
|
|
|
init_idle_pids(task->pids);
|
|
|
init_idle(task, cpu);
|
|
@@ -1751,7 +1754,7 @@ long _do_fork(unsigned long clone_flags,
|
|
|
}
|
|
|
|
|
|
p = copy_process(clone_flags, stack_start, stack_size,
|
|
|
- child_tidptr, NULL, trace, tls);
|
|
|
+ child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
|
|
|
/*
|
|
|
* Do this prior waking up the new thread - the thread pointer
|
|
|
* might get invalid after that point, if the thread exits quickly.
|