|
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
|
|
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
|
|
|
return false;
|
|
|
|
|
|
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
|
|
|
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
|
|
+ affinity = cpu_online_mask;
|
|
|
ret = true;
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * when using forced irq_set_affinity we must ensure that the cpu
|
|
|
- * being offlined is not present in the affinity mask, it may be
|
|
|
- * selected as the target CPU otherwise
|
|
|
- */
|
|
|
- affinity = cpu_online_mask;
|
|
|
c = irq_data_get_irq_chip(d);
|
|
|
if (!c->irq_set_affinity)
|
|
|
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
|
|
- else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
|
|
|
+ else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
|
|
|
cpumask_copy(d->affinity, affinity);
|
|
|
|
|
|
return ret;
|