cpuhotplug.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. /*
  2. * Generic cpu hotunplug interrupt migration code copied from the
  3. * arch/arm implementation
  4. *
  5. * Copyright (C) Russell King
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/interrupt.h>
  12. #include <linux/ratelimit.h>
  13. #include <linux/irq.h>
  14. #include "internals.h"
  15. /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
  16. static inline bool irq_needs_fixup(struct irq_data *d)
  17. {
  18. const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
  19. return cpumask_test_cpu(smp_processor_id(), m);
  20. }
  21. static bool migrate_one_irq(struct irq_desc *desc)
  22. {
  23. struct irq_data *d = irq_desc_get_irq_data(desc);
  24. struct irq_chip *chip = irq_data_get_irq_chip(d);
  25. bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
  26. const struct cpumask *affinity;
  27. bool brokeaff = false;
  28. int err;
  29. /*
  30. * IRQ chip might be already torn down, but the irq descriptor is
  31. * still in the radix tree. Also if the chip has no affinity setter,
  32. * nothing can be done here.
  33. */
  34. if (!chip || !chip->irq_set_affinity) {
  35. pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
  36. return false;
  37. }
  38. /*
  39. * No move required, if:
  40. * - Interrupt is per cpu
  41. * - Interrupt is not started
  42. * - Affinity mask does not include this CPU.
  43. *
  44. * Note: Do not check desc->action as this might be a chained
  45. * interrupt.
  46. */
  47. if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
  48. /*
  49. * If an irq move is pending, abort it if the dying CPU is
  50. * the sole target.
  51. */
  52. irq_fixup_move_pending(desc, false);
  53. return false;
  54. }
  55. /*
  56. * Complete an eventually pending irq move cleanup. If this
  57. * interrupt was moved in hard irq context, then the vectors need
  58. * to be cleaned up. It can't wait until this interrupt actually
  59. * happens and this CPU was involved.
  60. */
  61. irq_force_complete_move(desc);
  62. /*
  63. * If there is a setaffinity pending, then try to reuse the pending
  64. * mask, so the last change of the affinity does not get lost. If
  65. * there is no move pending or the pending mask does not contain
  66. * any online CPU, use the current affinity mask.
  67. */
  68. if (irq_fixup_move_pending(desc, true))
  69. affinity = irq_desc_get_pending_mask(desc);
  70. else
  71. affinity = irq_data_get_affinity_mask(d);
  72. /* Mask the chip for interrupts which cannot move in process context */
  73. if (maskchip && chip->irq_mask)
  74. chip->irq_mask(d);
  75. if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
  76. /*
  77. * If the interrupt is managed, then shut it down and leave
  78. * the affinity untouched.
  79. */
  80. if (irqd_affinity_is_managed(d)) {
  81. irqd_set_managed_shutdown(d);
  82. irq_shutdown(desc);
  83. return false;
  84. }
  85. affinity = cpu_online_mask;
  86. brokeaff = true;
  87. }
  88. /*
  89. * Do not set the force argument of irq_do_set_affinity() as this
  90. * disables the masking of offline CPUs from the supplied affinity
  91. * mask and therefore might keep/reassign the irq to the outgoing
  92. * CPU.
  93. */
  94. err = irq_do_set_affinity(d, affinity, false);
  95. if (err) {
  96. pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
  97. d->irq, err);
  98. brokeaff = false;
  99. }
  100. if (maskchip && chip->irq_unmask)
  101. chip->irq_unmask(d);
  102. return brokeaff;
  103. }
  104. /**
  105. * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
  106. *
  107. * The current CPU has been marked offline. Migrate IRQs off this CPU.
  108. * If the affinity settings do not allow other CPUs, force them onto any
  109. * available CPU.
  110. *
  111. * Note: we must iterate over all IRQs, whether they have an attached
  112. * action structure or not, as we need to get chained interrupts too.
  113. */
  114. void irq_migrate_all_off_this_cpu(void)
  115. {
  116. struct irq_desc *desc;
  117. unsigned int irq;
  118. for_each_active_irq(irq) {
  119. bool affinity_broken;
  120. desc = irq_to_desc(irq);
  121. raw_spin_lock(&desc->lock);
  122. affinity_broken = migrate_one_irq(desc);
  123. raw_spin_unlock(&desc->lock);
  124. if (affinity_broken) {
  125. pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
  126. irq, smp_processor_id());
  127. }
  128. }
  129. }
  130. static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
  131. {
  132. struct irq_data *data = irq_desc_get_irq_data(desc);
  133. const struct cpumask *affinity = irq_data_get_affinity_mask(data);
  134. if (!irqd_affinity_is_managed(data) || !desc->action ||
  135. !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
  136. return;
  137. if (irqd_is_managed_and_shutdown(data)) {
  138. irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
  139. return;
  140. }
  141. /*
  142. * If the interrupt can only be directed to a single target
  143. * CPU then it is already assigned to a CPU in the affinity
  144. * mask. No point in trying to move it around.
  145. */
  146. if (!irqd_is_single_target(data))
  147. irq_set_affinity_locked(data, affinity, false);
  148. }
  149. /**
  150. * irq_affinity_online_cpu - Restore affinity for managed interrupts
  151. * @cpu: Upcoming CPU for which interrupts should be restored
  152. */
  153. int irq_affinity_online_cpu(unsigned int cpu)
  154. {
  155. struct irq_desc *desc;
  156. unsigned int irq;
  157. irq_lock_sparse();
  158. for_each_active_irq(irq) {
  159. desc = irq_to_desc(irq);
  160. raw_spin_lock_irq(&desc->lock);
  161. irq_restore_affinity_of_irq(desc, cpu);
  162. raw_spin_unlock_irq(&desc->lock);
  163. }
  164. irq_unlock_sparse();
  165. return 0;
  166. }