|
@@ -8436,6 +8436,12 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
|
|
|
*/
|
|
|
this_rq->idle_stamp = rq_clock(this_rq);
|
|
|
|
|
|
+ /*
|
|
|
+ * Do not pull tasks towards !active CPUs...
|
|
|
+ */
|
|
|
+ if (!cpu_active(this_cpu))
|
|
|
+ return 0;
|
|
|
+
|
|
|
/*
|
|
|
* This is OK, because current is on_cpu, which avoids it being picked
|
|
|
* for load-balance and preemption/IRQs are still disabled avoiding
|
|
@@ -8543,6 +8549,13 @@ static int active_load_balance_cpu_stop(void *data)
|
|
|
struct rq_flags rf;
|
|
|
|
|
|
rq_lock_irq(busiest_rq, &rf);
|
|
|
+ /*
|
|
|
+ * Between queueing the stop-work and running it is a hole in which
|
|
|
+ * CPUs can become inactive. We should not move tasks from or to
|
|
|
+ * inactive CPUs.
|
|
|
+ */
|
|
|
+ if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
|
|
|
+ goto out_unlock;
|
|
|
|
|
|
/* make sure the requested cpu hasn't gone down in the meantime */
|
|
|
if (unlikely(busiest_cpu != smp_processor_id() ||
|