irq.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /*
  2. * Common interrupt code for 32 and 64 bit
  3. */
  4. #include <linux/cpu.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/kernel_stat.h>
  7. #include <linux/of.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/smp.h>
  10. #include <linux/ftrace.h>
  11. #include <linux/delay.h>
  12. #include <linux/export.h>
  13. #include <asm/apic.h>
  14. #include <asm/io_apic.h>
  15. #include <asm/irq.h>
  16. #include <asm/idle.h>
  17. #include <asm/mce.h>
  18. #include <asm/hw_irq.h>
  19. #define CREATE_TRACE_POINTS
  20. #include <asm/trace/irq_vectors.h>
  21. atomic_t irq_err_count;
  22. /* Function pointer for generic interrupt vector handling */
  23. void (*x86_platform_ipi_callback)(void) = NULL;
  24. /*
  25. * 'what should we do if we get a hw irq event on an illegal vector'.
  26. * each architecture has to answer this themselves.
  27. */
  28. void ack_bad_irq(unsigned int irq)
  29. {
  30. if (printk_ratelimit())
  31. pr_err("unexpected IRQ trap at vector %02x\n", irq);
  32. /*
  33. * Currently unexpected vectors happen only on SMP and APIC.
  34. * We _must_ ack these because every local APIC has only N
  35. * irq slots per priority level, and a 'hanging, unacked' IRQ
  36. * holds up an irq slot - in excessive cases (when multiple
  37. * unexpected vectors occur) that might lock up the APIC
  38. * completely.
  39. * But only ack when the APIC is enabled -AK
  40. */
  41. ack_APIC_irq();
  42. }
  43. #define irq_stats(x) (&per_cpu(irq_stat, x))
  44. /*
  45. * /proc/interrupts printing for arch specific interrupts
  46. */
  47. int arch_show_interrupts(struct seq_file *p, int prec)
  48. {
  49. int j;
  50. seq_printf(p, "%*s: ", prec, "NMI");
  51. for_each_online_cpu(j)
  52. seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
  53. seq_printf(p, " Non-maskable interrupts\n");
  54. #ifdef CONFIG_X86_LOCAL_APIC
  55. seq_printf(p, "%*s: ", prec, "LOC");
  56. for_each_online_cpu(j)
  57. seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
  58. seq_printf(p, " Local timer interrupts\n");
  59. seq_printf(p, "%*s: ", prec, "SPU");
  60. for_each_online_cpu(j)
  61. seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
  62. seq_printf(p, " Spurious interrupts\n");
  63. seq_printf(p, "%*s: ", prec, "PMI");
  64. for_each_online_cpu(j)
  65. seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
  66. seq_printf(p, " Performance monitoring interrupts\n");
  67. seq_printf(p, "%*s: ", prec, "IWI");
  68. for_each_online_cpu(j)
  69. seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
  70. seq_printf(p, " IRQ work interrupts\n");
  71. seq_printf(p, "%*s: ", prec, "RTR");
  72. for_each_online_cpu(j)
  73. seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
  74. seq_printf(p, " APIC ICR read retries\n");
  75. #endif
  76. if (x86_platform_ipi_callback) {
  77. seq_printf(p, "%*s: ", prec, "PLT");
  78. for_each_online_cpu(j)
  79. seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
  80. seq_printf(p, " Platform interrupts\n");
  81. }
  82. #ifdef CONFIG_SMP
  83. seq_printf(p, "%*s: ", prec, "RES");
  84. for_each_online_cpu(j)
  85. seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
  86. seq_printf(p, " Rescheduling interrupts\n");
  87. seq_printf(p, "%*s: ", prec, "CAL");
  88. for_each_online_cpu(j)
  89. seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
  90. irq_stats(j)->irq_tlb_count);
  91. seq_printf(p, " Function call interrupts\n");
  92. seq_printf(p, "%*s: ", prec, "TLB");
  93. for_each_online_cpu(j)
  94. seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
  95. seq_printf(p, " TLB shootdowns\n");
  96. #endif
  97. #ifdef CONFIG_X86_THERMAL_VECTOR
  98. seq_printf(p, "%*s: ", prec, "TRM");
  99. for_each_online_cpu(j)
  100. seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
  101. seq_printf(p, " Thermal event interrupts\n");
  102. #endif
  103. #ifdef CONFIG_X86_MCE_THRESHOLD
  104. seq_printf(p, "%*s: ", prec, "THR");
  105. for_each_online_cpu(j)
  106. seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
  107. seq_printf(p, " Threshold APIC interrupts\n");
  108. #endif
  109. #ifdef CONFIG_X86_MCE
  110. seq_printf(p, "%*s: ", prec, "MCE");
  111. for_each_online_cpu(j)
  112. seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
  113. seq_printf(p, " Machine check exceptions\n");
  114. seq_printf(p, "%*s: ", prec, "MCP");
  115. for_each_online_cpu(j)
  116. seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
  117. seq_printf(p, " Machine check polls\n");
  118. #endif
  119. #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
  120. seq_printf(p, "%*s: ", prec, "THR");
  121. for_each_online_cpu(j)
  122. seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
  123. seq_printf(p, " Hypervisor callback interrupts\n");
  124. #endif
  125. seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
  126. #if defined(CONFIG_X86_IO_APIC)
  127. seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
  128. #endif
  129. return 0;
  130. }
  131. /*
  132. * /proc/stat helpers
  133. */
  134. u64 arch_irq_stat_cpu(unsigned int cpu)
  135. {
  136. u64 sum = irq_stats(cpu)->__nmi_count;
  137. #ifdef CONFIG_X86_LOCAL_APIC
  138. sum += irq_stats(cpu)->apic_timer_irqs;
  139. sum += irq_stats(cpu)->irq_spurious_count;
  140. sum += irq_stats(cpu)->apic_perf_irqs;
  141. sum += irq_stats(cpu)->apic_irq_work_irqs;
  142. sum += irq_stats(cpu)->icr_read_retry_count;
  143. #endif
  144. if (x86_platform_ipi_callback)
  145. sum += irq_stats(cpu)->x86_platform_ipis;
  146. #ifdef CONFIG_SMP
  147. sum += irq_stats(cpu)->irq_resched_count;
  148. sum += irq_stats(cpu)->irq_call_count;
  149. #endif
  150. #ifdef CONFIG_X86_THERMAL_VECTOR
  151. sum += irq_stats(cpu)->irq_thermal_count;
  152. #endif
  153. #ifdef CONFIG_X86_MCE_THRESHOLD
  154. sum += irq_stats(cpu)->irq_threshold_count;
  155. #endif
  156. #ifdef CONFIG_X86_MCE
  157. sum += per_cpu(mce_exception_count, cpu);
  158. sum += per_cpu(mce_poll_count, cpu);
  159. #endif
  160. return sum;
  161. }
  162. u64 arch_irq_stat(void)
  163. {
  164. u64 sum = atomic_read(&irq_err_count);
  165. return sum;
  166. }
  167. /*
  168. * do_IRQ handles all normal device IRQ's (the special
  169. * SMP cross-CPU interrupts have their own specific
  170. * handlers).
  171. */
  172. __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
  173. {
  174. struct pt_regs *old_regs = set_irq_regs(regs);
  175. /* high bit used in ret_from_ code */
  176. unsigned vector = ~regs->orig_ax;
  177. unsigned irq;
  178. irq_enter();
  179. exit_idle();
  180. irq = __this_cpu_read(vector_irq[vector]);
  181. if (!handle_irq(irq, regs)) {
  182. ack_APIC_irq();
  183. if (irq != VECTOR_RETRIGGERED) {
  184. pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
  185. __func__, smp_processor_id(),
  186. vector, irq);
  187. } else {
  188. __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
  189. }
  190. }
  191. irq_exit();
  192. set_irq_regs(old_regs);
  193. return 1;
  194. }
  195. /*
  196. * Handler for X86_PLATFORM_IPI_VECTOR.
  197. */
  198. void __smp_x86_platform_ipi(void)
  199. {
  200. inc_irq_stat(x86_platform_ipis);
  201. if (x86_platform_ipi_callback)
  202. x86_platform_ipi_callback();
  203. }
  204. __visible void smp_x86_platform_ipi(struct pt_regs *regs)
  205. {
  206. struct pt_regs *old_regs = set_irq_regs(regs);
  207. entering_ack_irq();
  208. __smp_x86_platform_ipi();
  209. exiting_irq();
  210. set_irq_regs(old_regs);
  211. }
  212. #ifdef CONFIG_HAVE_KVM
  213. /*
  214. * Handler for POSTED_INTERRUPT_VECTOR.
  215. */
  216. __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
  217. {
  218. struct pt_regs *old_regs = set_irq_regs(regs);
  219. ack_APIC_irq();
  220. irq_enter();
  221. exit_idle();
  222. inc_irq_stat(kvm_posted_intr_ipis);
  223. irq_exit();
  224. set_irq_regs(old_regs);
  225. }
  226. #endif
  227. __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
  228. {
  229. struct pt_regs *old_regs = set_irq_regs(regs);
  230. entering_ack_irq();
  231. trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
  232. __smp_x86_platform_ipi();
  233. trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
  234. exiting_irq();
  235. set_irq_regs(old_regs);
  236. }
  237. EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
  238. #ifdef CONFIG_HOTPLUG_CPU
  239. /* These two declarations are only used in check_irq_vectors_for_cpu_disable()
  240. * below, which is protected by stop_machine(). Putting them on the stack
  241. * results in a stack frame overflow. Dynamically allocating could result in a
  242. * failure so declare these two cpumasks as global.
  243. */
  244. static struct cpumask affinity_new, online_new;
  245. /*
  246. * This cpu is going to be removed and its vectors migrated to the remaining
  247. * online cpus. Check to see if there are enough vectors in the remaining cpus.
  248. * This function is protected by stop_machine().
  249. */
  250. int check_irq_vectors_for_cpu_disable(void)
  251. {
  252. int irq, cpu;
  253. unsigned int this_cpu, vector, this_count, count;
  254. struct irq_desc *desc;
  255. struct irq_data *data;
  256. this_cpu = smp_processor_id();
  257. cpumask_copy(&online_new, cpu_online_mask);
  258. cpu_clear(this_cpu, online_new);
  259. this_count = 0;
  260. for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
  261. irq = __this_cpu_read(vector_irq[vector]);
  262. if (irq >= 0) {
  263. desc = irq_to_desc(irq);
  264. data = irq_desc_get_irq_data(desc);
  265. cpumask_copy(&affinity_new, data->affinity);
  266. cpu_clear(this_cpu, affinity_new);
  267. /* Do not count inactive or per-cpu irqs. */
  268. if (!irq_has_action(irq) || irqd_is_per_cpu(data))
  269. continue;
  270. /*
  271. * A single irq may be mapped to multiple
  272. * cpu's vector_irq[] (for example IOAPIC cluster
  273. * mode). In this case we have two
  274. * possibilities:
  275. *
  276. * 1) the resulting affinity mask is empty; that is
  277. * this the down'd cpu is the last cpu in the irq's
  278. * affinity mask, or
  279. *
  280. * 2) the resulting affinity mask is no longer
  281. * a subset of the online cpus but the affinity
  282. * mask is not zero; that is the down'd cpu is the
  283. * last online cpu in a user set affinity mask.
  284. */
  285. if (cpumask_empty(&affinity_new) ||
  286. !cpumask_subset(&affinity_new, &online_new))
  287. this_count++;
  288. }
  289. }
  290. count = 0;
  291. for_each_online_cpu(cpu) {
  292. if (cpu == this_cpu)
  293. continue;
  294. for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
  295. vector++) {
  296. if (per_cpu(vector_irq, cpu)[vector] < 0)
  297. count++;
  298. }
  299. }
  300. if (count < this_count) {
  301. pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
  302. this_cpu, this_count, count);
  303. return -ERANGE;
  304. }
  305. return 0;
  306. }
  307. /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
  308. void fixup_irqs(void)
  309. {
  310. unsigned int irq, vector;
  311. static int warned;
  312. struct irq_desc *desc;
  313. struct irq_data *data;
  314. struct irq_chip *chip;
  315. for_each_irq_desc(irq, desc) {
  316. int break_affinity = 0;
  317. int set_affinity = 1;
  318. const struct cpumask *affinity;
  319. if (!desc)
  320. continue;
  321. if (irq == 2)
  322. continue;
  323. /* interrupt's are disabled at this point */
  324. raw_spin_lock(&desc->lock);
  325. data = irq_desc_get_irq_data(desc);
  326. affinity = data->affinity;
  327. if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
  328. cpumask_subset(affinity, cpu_online_mask)) {
  329. raw_spin_unlock(&desc->lock);
  330. continue;
  331. }
  332. /*
  333. * Complete the irq move. This cpu is going down and for
  334. * non intr-remapping case, we can't wait till this interrupt
  335. * arrives at this cpu before completing the irq move.
  336. */
  337. irq_force_complete_move(irq);
  338. if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
  339. break_affinity = 1;
  340. affinity = cpu_online_mask;
  341. }
  342. chip = irq_data_get_irq_chip(data);
  343. if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
  344. chip->irq_mask(data);
  345. if (chip->irq_set_affinity)
  346. chip->irq_set_affinity(data, affinity, true);
  347. else if (!(warned++))
  348. set_affinity = 0;
  349. /*
  350. * We unmask if the irq was not marked masked by the
  351. * core code. That respects the lazy irq disable
  352. * behaviour.
  353. */
  354. if (!irqd_can_move_in_process_context(data) &&
  355. !irqd_irq_masked(data) && chip->irq_unmask)
  356. chip->irq_unmask(data);
  357. raw_spin_unlock(&desc->lock);
  358. if (break_affinity && set_affinity)
  359. pr_notice("Broke affinity for irq %i\n", irq);
  360. else if (!set_affinity)
  361. pr_notice("Cannot set affinity for irq %i\n", irq);
  362. }
  363. /*
  364. * We can remove mdelay() and then send spuriuous interrupts to
  365. * new cpu targets for all the irqs that were handled previously by
  366. * this cpu. While it works, I have seen spurious interrupt messages
  367. * (nothing wrong but still...).
  368. *
  369. * So for now, retain mdelay(1) and check the IRR and then send those
  370. * interrupts to new targets as this cpu is already offlined...
  371. */
  372. mdelay(1);
  373. for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
  374. unsigned int irr;
  375. if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED)
  376. continue;
  377. irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
  378. if (irr & (1 << (vector % 32))) {
  379. irq = __this_cpu_read(vector_irq[vector]);
  380. desc = irq_to_desc(irq);
  381. data = irq_desc_get_irq_data(desc);
  382. chip = irq_data_get_irq_chip(data);
  383. raw_spin_lock(&desc->lock);
  384. if (chip->irq_retrigger) {
  385. chip->irq_retrigger(data);
  386. __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
  387. }
  388. raw_spin_unlock(&desc->lock);
  389. }
  390. if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
  391. __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
  392. }
  393. }
  394. #endif