|
@@ -4744,7 +4744,7 @@ static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
|
|
|
|
|
|
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
{
|
|
|
- if (p->sched_class && p->sched_class->set_cpus_allowed)
|
|
|
+ if (p->sched_class->set_cpus_allowed)
|
|
|
p->sched_class->set_cpus_allowed(p, new_mask);
|
|
|
|
|
|
cpumask_copy(&p->cpus_allowed, new_mask);
|
|
@@ -7253,6 +7253,11 @@ void __init sched_init(void)
|
|
|
atomic_inc(&init_mm.mm_count);
|
|
|
enter_lazy_tlb(&init_mm, current);
|
|
|
|
|
|
+ /*
|
|
|
+ * During early bootup we pretend to be a normal task:
|
|
|
+ */
|
|
|
+ current->sched_class = &fair_sched_class;
|
|
|
+
|
|
|
/*
|
|
|
* Make us the idle thread. Technically, schedule() should not be
|
|
|
* called from this thread, however somewhere below it might be,
|
|
@@ -7263,11 +7268,6 @@ void __init sched_init(void)
|
|
|
|
|
|
calc_load_update = jiffies + LOAD_FREQ;
|
|
|
|
|
|
- /*
|
|
|
- * During early bootup we pretend to be a normal task:
|
|
|
- */
|
|
|
- current->sched_class = &fair_sched_class;
|
|
|
-
|
|
|
#ifdef CONFIG_SMP
|
|
|
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
|
|
|
/* May be allocated at isolcpus cmdline parse time */
|