|
@@ -1153,6 +1153,8 @@ static int migration_cpu_stop(void *data)
|
|
|
|
|
|
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
{
|
|
|
+ lockdep_assert_held(&p->pi_lock);
|
|
|
+
|
|
|
if (p->sched_class->set_cpus_allowed)
|
|
|
p->sched_class->set_cpus_allowed(p, new_mask);
|
|
|
|
|
@@ -1169,7 +1171,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
* task must not exit() & deallocate itself prematurely. The
|
|
|
* call is not atomic; no spinlocks may be held.
|
|
|
*/
|
|
|
-int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
+static int __set_cpus_allowed_ptr(struct task_struct *p,
|
|
|
+ const struct cpumask *new_mask, bool check)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct rq *rq;
|
|
@@ -1178,6 +1181,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
|
|
|
+ /*
|
|
|
+ * Must re-check here, to close a race against __kthread_bind(),
|
|
|
+ * sched_setaffinity() is not guaranteed to observe the flag.
|
|
|
+ */
|
|
|
+ if (check && (p->flags & PF_NO_SETAFFINITY)) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
if (cpumask_equal(&p->cpus_allowed, new_mask))
|
|
|
goto out;
|
|
|
|
|
@@ -1214,6 +1226,11 @@ out:
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
+
|
|
|
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
+{
|
|
|
+ return __set_cpus_allowed_ptr(p, new_mask, false);
|
|
|
+}
|
|
|
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
|
|
|
|
|
|
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|
@@ -1595,6 +1612,15 @@ static void update_avg(u64 *avg, u64 sample)
|
|
|
s64 diff = sample - *avg;
|
|
|
*avg += diff >> 3;
|
|
|
}
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+static inline int __set_cpus_allowed_ptr(struct task_struct *p,
|
|
|
+ const struct cpumask *new_mask, bool check)
|
|
|
+{
|
|
|
+ return set_cpus_allowed_ptr(p, new_mask);
|
|
|
+}
|
|
|
+
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
static void
|
|
@@ -4340,7 +4366,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
|
|
}
|
|
|
#endif
|
|
|
again:
|
|
|
- retval = set_cpus_allowed_ptr(p, new_mask);
|
|
|
+ retval = __set_cpus_allowed_ptr(p, new_mask, true);
|
|
|
|
|
|
if (!retval) {
|
|
|
cpuset_cpus_allowed(p, cpus_allowed);
|
|
@@ -4865,7 +4891,8 @@ void init_idle(struct task_struct *idle, int cpu)
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&rq->lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&idle->pi_lock, flags);
|
|
|
+ raw_spin_lock(&rq->lock);
|
|
|
|
|
|
__sched_fork(0, idle);
|
|
|
idle->state = TASK_RUNNING;
|
|
@@ -4891,7 +4918,8 @@ void init_idle(struct task_struct *idle, int cpu)
|
|
|
#if defined(CONFIG_SMP)
|
|
|
idle->on_cpu = 1;
|
|
|
#endif
|
|
|
- raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
+ raw_spin_unlock(&rq->lock);
|
|
|
+ raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
|
|
|
|
|
|
/* Set the preempt count _outside_ the spinlocks! */
|
|
|
init_idle_preempt_count(idle, cpu);
|