|
@@ -1798,7 +1798,7 @@ static int find_later_rq(struct task_struct *task)
|
|
struct sched_domain *sd;
|
|
struct sched_domain *sd;
|
|
struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
|
|
struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
|
|
int this_cpu = smp_processor_id();
|
|
int this_cpu = smp_processor_id();
|
|
- int best_cpu, cpu = task_cpu(task);
|
|
|
|
|
|
+ int cpu = task_cpu(task);
|
|
|
|
|
|
/* Make sure the mask is initialized first */
|
|
/* Make sure the mask is initialized first */
|
|
if (unlikely(!later_mask))
|
|
if (unlikely(!later_mask))
|
|
@@ -1811,17 +1811,14 @@ static int find_later_rq(struct task_struct *task)
|
|
* We have to consider system topology and task affinity
|
|
* We have to consider system topology and task affinity
|
|
* first, then we can look for a suitable cpu.
|
|
* first, then we can look for a suitable cpu.
|
|
*/
|
|
*/
|
|
- best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
|
|
|
|
- task, later_mask);
|
|
|
|
- if (best_cpu == -1)
|
|
|
|
|
|
+ if (cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask) == -1)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * If we are here, some target has been found,
|
|
|
|
- * the most suitable of which is cached in best_cpu.
|
|
|
|
- * This is, among the runqueues where the current tasks
|
|
|
|
- * have later deadlines than the task's one, the rq
|
|
|
|
- * with the latest possible one.
|
|
|
|
|
|
+ * If we are here, some targets have been found, including
|
|
|
|
+ * the most suitable which is, among the runqueues where the
|
|
|
|
+ * current tasks have later deadlines than the task's one, the
|
|
|
|
+ * rq with the latest possible one.
|
|
*
|
|
*
|
|
* Now we check how well this matches with task's
|
|
* Now we check how well this matches with task's
|
|
* affinity and system topology.
|
|
* affinity and system topology.
|
|
@@ -1841,6 +1838,7 @@ static int find_later_rq(struct task_struct *task)
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
for_each_domain(cpu, sd) {
|
|
for_each_domain(cpu, sd) {
|
|
if (sd->flags & SD_WAKE_AFFINE) {
|
|
if (sd->flags & SD_WAKE_AFFINE) {
|
|
|
|
+ int best_cpu;
|
|
|
|
|
|
/*
|
|
/*
|
|
* If possible, preempting this_cpu is
|
|
* If possible, preempting this_cpu is
|
|
@@ -1852,12 +1850,15 @@ static int find_later_rq(struct task_struct *task)
|
|
return this_cpu;
|
|
return this_cpu;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ best_cpu = cpumask_first_and(later_mask,
|
|
|
|
+ sched_domain_span(sd));
|
|
/*
|
|
/*
|
|
- * Last chance: if best_cpu is valid and is
|
|
|
|
- * in the mask, that becomes our choice.
|
|
|
|
|
|
+ * Last chance: if a cpu being in both later_mask
|
|
|
|
+ * and current sd span is valid, that becomes our
|
|
|
|
+ * choice. Of course, the latest possible cpu is
|
|
|
|
+ * already under consideration through later_mask.
|
|
*/
|
|
*/
|
|
- if (best_cpu < nr_cpu_ids &&
|
|
|
|
- cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
|
|
|
|
|
|
+ if (best_cpu < nr_cpu_ids) {
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
return best_cpu;
|
|
return best_cpu;
|
|
}
|
|
}
|