vector.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987
  1. /*
  2. * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
  3. *
  4. * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
  5. * Moved from arch/x86/kernel/apic/io_apic.c.
  6. * Jiang Liu <jiang.liu@linux.intel.com>
  7. * Enable support of hierarchical irqdomains
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/interrupt.h>
  14. #include <linux/init.h>
  15. #include <linux/compiler.h>
  16. #include <linux/slab.h>
  17. #include <asm/irqdomain.h>
  18. #include <asm/hw_irq.h>
  19. #include <asm/apic.h>
  20. #include <asm/i8259.h>
  21. #include <asm/desc.h>
  22. #include <asm/irq_remapping.h>
  23. struct apic_chip_data {
  24. struct irq_cfg cfg;
  25. cpumask_var_t domain;
  26. cpumask_var_t old_domain;
  27. u8 move_in_progress : 1;
  28. };
  29. struct irq_domain *x86_vector_domain;
  30. EXPORT_SYMBOL_GPL(x86_vector_domain);
  31. static DEFINE_RAW_SPINLOCK(vector_lock);
  32. static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
  33. static struct irq_chip lapic_controller;
  34. #ifdef CONFIG_X86_IO_APIC
  35. static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
  36. #endif
  37. void lock_vector_lock(void)
  38. {
  39. /* Used to the online set of cpus does not change
  40. * during assign_irq_vector.
  41. */
  42. raw_spin_lock(&vector_lock);
  43. }
  44. void unlock_vector_lock(void)
  45. {
  46. raw_spin_unlock(&vector_lock);
  47. }
  48. static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
  49. {
  50. if (!irq_data)
  51. return NULL;
  52. while (irq_data->parent_data)
  53. irq_data = irq_data->parent_data;
  54. return irq_data->chip_data;
  55. }
  56. struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
  57. {
  58. struct apic_chip_data *data = apic_chip_data(irq_data);
  59. return data ? &data->cfg : NULL;
  60. }
  61. EXPORT_SYMBOL_GPL(irqd_cfg);
  62. struct irq_cfg *irq_cfg(unsigned int irq)
  63. {
  64. return irqd_cfg(irq_get_irq_data(irq));
  65. }
  66. static struct apic_chip_data *alloc_apic_chip_data(int node)
  67. {
  68. struct apic_chip_data *data;
  69. data = kzalloc_node(sizeof(*data), GFP_KERNEL, node);
  70. if (!data)
  71. return NULL;
  72. if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node))
  73. goto out_data;
  74. if (!zalloc_cpumask_var_node(&data->old_domain, GFP_KERNEL, node))
  75. goto out_domain;
  76. return data;
  77. out_domain:
  78. free_cpumask_var(data->domain);
  79. out_data:
  80. kfree(data);
  81. return NULL;
  82. }
  83. static void free_apic_chip_data(struct apic_chip_data *data)
  84. {
  85. if (data) {
  86. free_cpumask_var(data->domain);
  87. free_cpumask_var(data->old_domain);
  88. kfree(data);
  89. }
  90. }
  91. static int __assign_irq_vector(int irq, struct apic_chip_data *d,
  92. const struct cpumask *mask,
  93. struct irq_data *irqdata)
  94. {
  95. /*
  96. * NOTE! The local APIC isn't very good at handling
  97. * multiple interrupts at the same interrupt level.
  98. * As the interrupt level is determined by taking the
  99. * vector number and shifting that right by 4, we
  100. * want to spread these out a bit so that they don't
  101. * all fall in the same interrupt level.
  102. *
  103. * Also, we've got to be careful not to trash gate
  104. * 0x80, because int 0x80 is hm, kind of importantish. ;)
  105. */
  106. static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
  107. static int current_offset = VECTOR_OFFSET_START % 16;
  108. int cpu, vector;
  109. /*
  110. * If there is still a move in progress or the previous move has not
  111. * been cleaned up completely, tell the caller to come back later.
  112. */
  113. if (d->move_in_progress ||
  114. cpumask_intersects(d->old_domain, cpu_online_mask))
  115. return -EBUSY;
  116. /* Only try and allocate irqs on cpus that are present */
  117. cpumask_clear(d->old_domain);
  118. cpumask_clear(searched_cpumask);
  119. cpu = cpumask_first_and(mask, cpu_online_mask);
  120. while (cpu < nr_cpu_ids) {
  121. int new_cpu, offset;
  122. cpumask_copy(vector_cpumask, cpumask_of(cpu));
  123. /*
  124. * Clear the offline cpus from @vector_cpumask for searching
  125. * and verify whether the result overlaps with @mask. If true,
  126. * then the call to apic->cpu_mask_to_apicid() will
  127. * succeed as well. If not, no point in trying to find a
  128. * vector in this mask.
  129. */
  130. cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
  131. if (!cpumask_intersects(vector_searchmask, mask))
  132. goto next_cpu;
  133. if (cpumask_subset(vector_cpumask, d->domain)) {
  134. if (cpumask_equal(vector_cpumask, d->domain))
  135. goto success;
  136. /*
  137. * Mark the cpus which are not longer in the mask for
  138. * cleanup.
  139. */
  140. cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
  141. vector = d->cfg.vector;
  142. goto update;
  143. }
  144. vector = current_vector;
  145. offset = current_offset;
  146. next:
  147. vector += 16;
  148. if (vector >= FIRST_SYSTEM_VECTOR) {
  149. offset = (offset + 1) % 16;
  150. vector = FIRST_EXTERNAL_VECTOR + offset;
  151. }
  152. /* If the search wrapped around, try the next cpu */
  153. if (unlikely(current_vector == vector))
  154. goto next_cpu;
  155. if (test_bit(vector, system_vectors))
  156. goto next;
  157. for_each_cpu(new_cpu, vector_searchmask) {
  158. if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
  159. goto next;
  160. }
  161. /* Found one! */
  162. current_vector = vector;
  163. current_offset = offset;
  164. /* Schedule the old vector for cleanup on all cpus */
  165. if (d->cfg.vector)
  166. cpumask_copy(d->old_domain, d->domain);
  167. for_each_cpu(new_cpu, vector_searchmask)
  168. per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
  169. goto update;
  170. next_cpu:
  171. /*
  172. * We exclude the current @vector_cpumask from the requested
  173. * @mask and try again with the next online cpu in the
  174. * result. We cannot modify @mask, so we use @vector_cpumask
  175. * as a temporary buffer here as it will be reassigned when
  176. * calling apic->vector_allocation_domain() above.
  177. */
  178. cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
  179. cpumask_andnot(vector_cpumask, mask, searched_cpumask);
  180. cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
  181. continue;
  182. }
  183. return -ENOSPC;
  184. update:
  185. /*
  186. * Exclude offline cpus from the cleanup mask and set the
  187. * move_in_progress flag when the result is not empty.
  188. */
  189. cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
  190. d->move_in_progress = !cpumask_empty(d->old_domain);
  191. d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
  192. d->cfg.vector = vector;
  193. cpumask_copy(d->domain, vector_cpumask);
  194. success:
  195. /*
  196. * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
  197. * as we already established, that mask & d->domain & cpu_online_mask
  198. * is not empty.
  199. *
  200. * vector_searchmask is a subset of d->domain and has the offline
  201. * cpus masked out.
  202. */
  203. cpumask_and(vector_searchmask, vector_searchmask, mask);
  204. BUG_ON(apic->cpu_mask_to_apicid(vector_searchmask, irqdata,
  205. &d->cfg.dest_apicid));
  206. return 0;
  207. }
  208. static int assign_irq_vector(int irq, struct apic_chip_data *data,
  209. const struct cpumask *mask,
  210. struct irq_data *irqdata)
  211. {
  212. int err;
  213. unsigned long flags;
  214. raw_spin_lock_irqsave(&vector_lock, flags);
  215. err = __assign_irq_vector(irq, data, mask, irqdata);
  216. raw_spin_unlock_irqrestore(&vector_lock, flags);
  217. return err;
  218. }
  219. static int assign_irq_vector_policy(int irq, int node,
  220. struct apic_chip_data *data,
  221. struct irq_alloc_info *info,
  222. struct irq_data *irqdata)
  223. {
  224. if (info && info->mask)
  225. return assign_irq_vector(irq, data, info->mask, irqdata);
  226. if (node != NUMA_NO_NODE &&
  227. assign_irq_vector(irq, data, cpumask_of_node(node), irqdata) == 0)
  228. return 0;
  229. return assign_irq_vector(irq, data, cpu_online_mask, irqdata);
  230. }
  231. static void clear_irq_vector(int irq, struct apic_chip_data *data)
  232. {
  233. struct irq_desc *desc;
  234. int cpu, vector;
  235. if (!data->cfg.vector)
  236. return;
  237. vector = data->cfg.vector;
  238. for_each_cpu_and(cpu, data->domain, cpu_online_mask)
  239. per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
  240. data->cfg.vector = 0;
  241. cpumask_clear(data->domain);
  242. /*
  243. * If move is in progress or the old_domain mask is not empty,
  244. * i.e. the cleanup IPI has not been processed yet, we need to remove
  245. * the old references to desc from all cpus vector tables.
  246. */
  247. if (!data->move_in_progress && cpumask_empty(data->old_domain))
  248. return;
  249. desc = irq_to_desc(irq);
  250. for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
  251. for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
  252. vector++) {
  253. if (per_cpu(vector_irq, cpu)[vector] != desc)
  254. continue;
  255. per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
  256. break;
  257. }
  258. }
  259. data->move_in_progress = 0;
  260. }
  261. void init_irq_alloc_info(struct irq_alloc_info *info,
  262. const struct cpumask *mask)
  263. {
  264. memset(info, 0, sizeof(*info));
  265. info->mask = mask;
  266. }
  267. void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
  268. {
  269. if (src)
  270. *dst = *src;
  271. else
  272. memset(dst, 0, sizeof(*dst));
  273. }
  274. static void x86_vector_free_irqs(struct irq_domain *domain,
  275. unsigned int virq, unsigned int nr_irqs)
  276. {
  277. struct apic_chip_data *apic_data;
  278. struct irq_data *irq_data;
  279. unsigned long flags;
  280. int i;
  281. for (i = 0; i < nr_irqs; i++) {
  282. irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
  283. if (irq_data && irq_data->chip_data) {
  284. raw_spin_lock_irqsave(&vector_lock, flags);
  285. clear_irq_vector(virq + i, irq_data->chip_data);
  286. apic_data = irq_data->chip_data;
  287. irq_domain_reset_irq_data(irq_data);
  288. raw_spin_unlock_irqrestore(&vector_lock, flags);
  289. free_apic_chip_data(apic_data);
  290. #ifdef CONFIG_X86_IO_APIC
  291. if (virq + i < nr_legacy_irqs())
  292. legacy_irq_data[virq + i] = NULL;
  293. #endif
  294. }
  295. }
  296. }
  297. static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
  298. unsigned int nr_irqs, void *arg)
  299. {
  300. struct irq_alloc_info *info = arg;
  301. struct apic_chip_data *data;
  302. struct irq_data *irq_data;
  303. int i, err, node;
  304. if (disable_apic)
  305. return -ENXIO;
  306. /* Currently vector allocator can't guarantee contiguous allocations */
  307. if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
  308. return -ENOSYS;
  309. for (i = 0; i < nr_irqs; i++) {
  310. irq_data = irq_domain_get_irq_data(domain, virq + i);
  311. BUG_ON(!irq_data);
  312. node = irq_data_get_node(irq_data);
  313. #ifdef CONFIG_X86_IO_APIC
  314. if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
  315. data = legacy_irq_data[virq + i];
  316. else
  317. #endif
  318. data = alloc_apic_chip_data(node);
  319. if (!data) {
  320. err = -ENOMEM;
  321. goto error;
  322. }
  323. irq_data->chip = &lapic_controller;
  324. irq_data->chip_data = data;
  325. irq_data->hwirq = virq + i;
  326. irqd_set_single_target(irq_data);
  327. err = assign_irq_vector_policy(virq + i, node, data, info,
  328. irq_data);
  329. if (err)
  330. goto error;
  331. }
  332. return 0;
  333. error:
  334. x86_vector_free_irqs(domain, virq, i + 1);
  335. return err;
  336. }
  337. static const struct irq_domain_ops x86_vector_domain_ops = {
  338. .alloc = x86_vector_alloc_irqs,
  339. .free = x86_vector_free_irqs,
  340. };
  341. int __init arch_probe_nr_irqs(void)
  342. {
  343. int nr;
  344. if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
  345. nr_irqs = NR_VECTORS * nr_cpu_ids;
  346. nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
  347. #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
  348. /*
  349. * for MSI and HT dyn irq
  350. */
  351. if (gsi_top <= NR_IRQS_LEGACY)
  352. nr += 8 * nr_cpu_ids;
  353. else
  354. nr += gsi_top * 16;
  355. #endif
  356. if (nr < nr_irqs)
  357. nr_irqs = nr;
  358. /*
  359. * We don't know if PIC is present at this point so we need to do
  360. * probe() to get the right number of legacy IRQs.
  361. */
  362. return legacy_pic->probe();
  363. }
  364. #ifdef CONFIG_X86_IO_APIC
  365. static void __init init_legacy_irqs(void)
  366. {
  367. int i, node = cpu_to_node(0);
  368. struct apic_chip_data *data;
  369. /*
  370. * For legacy IRQ's, start with assigning irq0 to irq15 to
  371. * ISA_IRQ_VECTOR(i) for all cpu's.
  372. */
  373. for (i = 0; i < nr_legacy_irqs(); i++) {
  374. data = legacy_irq_data[i] = alloc_apic_chip_data(node);
  375. BUG_ON(!data);
  376. data->cfg.vector = ISA_IRQ_VECTOR(i);
  377. cpumask_copy(data->domain, cpumask_of(0));
  378. irq_set_chip_data(i, data);
  379. }
  380. }
  381. #else
  382. static inline void init_legacy_irqs(void) { }
  383. #endif
  384. int __init arch_early_irq_init(void)
  385. {
  386. struct fwnode_handle *fn;
  387. init_legacy_irqs();
  388. fn = irq_domain_alloc_named_fwnode("VECTOR");
  389. BUG_ON(!fn);
  390. x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
  391. NULL);
  392. BUG_ON(x86_vector_domain == NULL);
  393. irq_domain_free_fwnode(fn);
  394. irq_set_default_host(x86_vector_domain);
  395. arch_init_msi_domain(x86_vector_domain);
  396. arch_init_htirq_domain(x86_vector_domain);
  397. BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
  398. BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
  399. BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
  400. return arch_early_ioapic_init();
  401. }
  402. /* Initialize vector_irq on a new cpu */
  403. static void __setup_vector_irq(int cpu)
  404. {
  405. struct apic_chip_data *data;
  406. struct irq_desc *desc;
  407. int irq, vector;
  408. /* Mark the inuse vectors */
  409. for_each_irq_desc(irq, desc) {
  410. struct irq_data *idata = irq_desc_get_irq_data(desc);
  411. data = apic_chip_data(idata);
  412. if (!data || !cpumask_test_cpu(cpu, data->domain))
  413. continue;
  414. vector = data->cfg.vector;
  415. per_cpu(vector_irq, cpu)[vector] = desc;
  416. }
  417. /* Mark the free vectors */
  418. for (vector = 0; vector < NR_VECTORS; ++vector) {
  419. desc = per_cpu(vector_irq, cpu)[vector];
  420. if (IS_ERR_OR_NULL(desc))
  421. continue;
  422. data = apic_chip_data(irq_desc_get_irq_data(desc));
  423. if (!cpumask_test_cpu(cpu, data->domain))
  424. per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
  425. }
  426. }
  427. /*
  428. * Setup the vector to irq mappings. Must be called with vector_lock held.
  429. */
  430. void setup_vector_irq(int cpu)
  431. {
  432. int irq;
  433. lockdep_assert_held(&vector_lock);
  434. /*
  435. * On most of the platforms, legacy PIC delivers the interrupts on the
  436. * boot cpu. But there are certain platforms where PIC interrupts are
  437. * delivered to multiple cpu's. If the legacy IRQ is handled by the
  438. * legacy PIC, for the new cpu that is coming online, setup the static
  439. * legacy vector to irq mapping:
  440. */
  441. for (irq = 0; irq < nr_legacy_irqs(); irq++)
  442. per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
  443. __setup_vector_irq(cpu);
  444. }
  445. static int apic_retrigger_irq(struct irq_data *irq_data)
  446. {
  447. struct apic_chip_data *data = apic_chip_data(irq_data);
  448. unsigned long flags;
  449. int cpu;
  450. raw_spin_lock_irqsave(&vector_lock, flags);
  451. cpu = cpumask_first_and(data->domain, cpu_online_mask);
  452. apic->send_IPI_mask(cpumask_of(cpu), data->cfg.vector);
  453. raw_spin_unlock_irqrestore(&vector_lock, flags);
  454. return 1;
  455. }
  456. void apic_ack_edge(struct irq_data *data)
  457. {
  458. irq_complete_move(irqd_cfg(data));
  459. irq_move_irq(data);
  460. ack_APIC_irq();
  461. }
  462. static int apic_set_affinity(struct irq_data *irq_data,
  463. const struct cpumask *dest, bool force)
  464. {
  465. struct apic_chip_data *data = irq_data->chip_data;
  466. int err, irq = irq_data->irq;
  467. if (!IS_ENABLED(CONFIG_SMP))
  468. return -EPERM;
  469. if (!cpumask_intersects(dest, cpu_online_mask))
  470. return -EINVAL;
  471. err = assign_irq_vector(irq, data, dest, irq_data);
  472. return err ? err : IRQ_SET_MASK_OK;
  473. }
  474. static struct irq_chip lapic_controller = {
  475. .name = "APIC",
  476. .irq_ack = apic_ack_edge,
  477. .irq_set_affinity = apic_set_affinity,
  478. .irq_retrigger = apic_retrigger_irq,
  479. };
  480. #ifdef CONFIG_SMP
  481. static void __send_cleanup_vector(struct apic_chip_data *data)
  482. {
  483. raw_spin_lock(&vector_lock);
  484. cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
  485. data->move_in_progress = 0;
  486. if (!cpumask_empty(data->old_domain))
  487. apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
  488. raw_spin_unlock(&vector_lock);
  489. }
  490. void send_cleanup_vector(struct irq_cfg *cfg)
  491. {
  492. struct apic_chip_data *data;
  493. data = container_of(cfg, struct apic_chip_data, cfg);
  494. if (data->move_in_progress)
  495. __send_cleanup_vector(data);
  496. }
  497. asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
  498. {
  499. unsigned vector, me;
  500. entering_ack_irq();
  501. /* Prevent vectors vanishing under us */
  502. raw_spin_lock(&vector_lock);
  503. me = smp_processor_id();
  504. for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
  505. struct apic_chip_data *data;
  506. struct irq_desc *desc;
  507. unsigned int irr;
  508. retry:
  509. desc = __this_cpu_read(vector_irq[vector]);
  510. if (IS_ERR_OR_NULL(desc))
  511. continue;
  512. if (!raw_spin_trylock(&desc->lock)) {
  513. raw_spin_unlock(&vector_lock);
  514. cpu_relax();
  515. raw_spin_lock(&vector_lock);
  516. goto retry;
  517. }
  518. data = apic_chip_data(irq_desc_get_irq_data(desc));
  519. if (!data)
  520. goto unlock;
  521. /*
  522. * Nothing to cleanup if irq migration is in progress
  523. * or this cpu is not set in the cleanup mask.
  524. */
  525. if (data->move_in_progress ||
  526. !cpumask_test_cpu(me, data->old_domain))
  527. goto unlock;
  528. /*
  529. * We have two cases to handle here:
  530. * 1) vector is unchanged but the target mask got reduced
  531. * 2) vector and the target mask has changed
  532. *
  533. * #1 is obvious, but in #2 we have two vectors with the same
  534. * irq descriptor: the old and the new vector. So we need to
  535. * make sure that we only cleanup the old vector. The new
  536. * vector has the current @vector number in the config and
  537. * this cpu is part of the target mask. We better leave that
  538. * one alone.
  539. */
  540. if (vector == data->cfg.vector &&
  541. cpumask_test_cpu(me, data->domain))
  542. goto unlock;
  543. irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
  544. /*
  545. * Check if the vector that needs to be cleanedup is
  546. * registered at the cpu's IRR. If so, then this is not
  547. * the best time to clean it up. Lets clean it up in the
  548. * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
  549. * to myself.
  550. */
  551. if (irr & (1 << (vector % 32))) {
  552. apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
  553. goto unlock;
  554. }
  555. __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
  556. cpumask_clear_cpu(me, data->old_domain);
  557. unlock:
  558. raw_spin_unlock(&desc->lock);
  559. }
  560. raw_spin_unlock(&vector_lock);
  561. exiting_irq();
  562. }
  563. static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
  564. {
  565. unsigned me;
  566. struct apic_chip_data *data;
  567. data = container_of(cfg, struct apic_chip_data, cfg);
  568. if (likely(!data->move_in_progress))
  569. return;
  570. me = smp_processor_id();
  571. if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain))
  572. __send_cleanup_vector(data);
  573. }
  574. void irq_complete_move(struct irq_cfg *cfg)
  575. {
  576. __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
  577. }
  578. /*
  579. * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
  580. */
  581. void irq_force_complete_move(struct irq_desc *desc)
  582. {
  583. struct irq_data *irqdata;
  584. struct apic_chip_data *data;
  585. struct irq_cfg *cfg;
  586. unsigned int cpu;
  587. /*
  588. * The function is called for all descriptors regardless of which
  589. * irqdomain they belong to. For example if an IRQ is provided by
  590. * an irq_chip as part of a GPIO driver, the chip data for that
  591. * descriptor is specific to the irq_chip in question.
  592. *
  593. * Check first that the chip_data is what we expect
  594. * (apic_chip_data) before touching it any further.
  595. */
  596. irqdata = irq_domain_get_irq_data(x86_vector_domain,
  597. irq_desc_get_irq(desc));
  598. if (!irqdata)
  599. return;
  600. data = apic_chip_data(irqdata);
  601. cfg = data ? &data->cfg : NULL;
  602. if (!cfg)
  603. return;
  604. /*
  605. * This is tricky. If the cleanup of @data->old_domain has not been
  606. * done yet, then the following setaffinity call will fail with
  607. * -EBUSY. This can leave the interrupt in a stale state.
  608. *
  609. * All CPUs are stuck in stop machine with interrupts disabled so
  610. * calling __irq_complete_move() would be completely pointless.
  611. */
  612. raw_spin_lock(&vector_lock);
  613. /*
  614. * Clean out all offline cpus (including the outgoing one) from the
  615. * old_domain mask.
  616. */
  617. cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
  618. /*
  619. * If move_in_progress is cleared and the old_domain mask is empty,
  620. * then there is nothing to cleanup. fixup_irqs() will take care of
  621. * the stale vectors on the outgoing cpu.
  622. */
  623. if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
  624. raw_spin_unlock(&vector_lock);
  625. return;
  626. }
  627. /*
  628. * 1) The interrupt is in move_in_progress state. That means that we
  629. * have not seen an interrupt since the io_apic was reprogrammed to
  630. * the new vector.
  631. *
  632. * 2) The interrupt has fired on the new vector, but the cleanup IPIs
  633. * have not been processed yet.
  634. */
  635. if (data->move_in_progress) {
  636. /*
  637. * In theory there is a race:
  638. *
  639. * set_ioapic(new_vector) <-- Interrupt is raised before update
  640. * is effective, i.e. it's raised on
  641. * the old vector.
  642. *
  643. * So if the target cpu cannot handle that interrupt before
  644. * the old vector is cleaned up, we get a spurious interrupt
  645. * and in the worst case the ioapic irq line becomes stale.
  646. *
  647. * But in case of cpu hotplug this should be a non issue
  648. * because if the affinity update happens right before all
  649. * cpus rendevouz in stop machine, there is no way that the
  650. * interrupt can be blocked on the target cpu because all cpus
  651. * loops first with interrupts enabled in stop machine, so the
  652. * old vector is not yet cleaned up when the interrupt fires.
  653. *
  654. * So the only way to run into this issue is if the delivery
  655. * of the interrupt on the apic/system bus would be delayed
  656. * beyond the point where the target cpu disables interrupts
  657. * in stop machine. I doubt that it can happen, but at least
  658. * there is a theroretical chance. Virtualization might be
  659. * able to expose this, but AFAICT the IOAPIC emulation is not
  660. * as stupid as the real hardware.
  661. *
  662. * Anyway, there is nothing we can do about that at this point
  663. * w/o refactoring the whole fixup_irq() business completely.
  664. * We print at least the irq number and the old vector number,
  665. * so we have the necessary information when a problem in that
  666. * area arises.
  667. */
  668. pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
  669. irqdata->irq, cfg->old_vector);
  670. }
  671. /*
  672. * If old_domain is not empty, then other cpus still have the irq
  673. * descriptor set in their vector array. Clean it up.
  674. */
  675. for_each_cpu(cpu, data->old_domain)
  676. per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
  677. /* Cleanup the left overs of the (half finished) move */
  678. cpumask_clear(data->old_domain);
  679. data->move_in_progress = 0;
  680. raw_spin_unlock(&vector_lock);
  681. }
  682. #endif
  683. static void __init print_APIC_field(int base)
  684. {
  685. int i;
  686. printk(KERN_DEBUG);
  687. for (i = 0; i < 8; i++)
  688. pr_cont("%08x", apic_read(base + i*0x10));
  689. pr_cont("\n");
  690. }
  691. static void __init print_local_APIC(void *dummy)
  692. {
  693. unsigned int i, v, ver, maxlvt;
  694. u64 icr;
  695. pr_debug("printing local APIC contents on CPU#%d/%d:\n",
  696. smp_processor_id(), hard_smp_processor_id());
  697. v = apic_read(APIC_ID);
  698. pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
  699. v = apic_read(APIC_LVR);
  700. pr_info("... APIC VERSION: %08x\n", v);
  701. ver = GET_APIC_VERSION(v);
  702. maxlvt = lapic_get_maxlvt();
  703. v = apic_read(APIC_TASKPRI);
  704. pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
  705. /* !82489DX */
  706. if (APIC_INTEGRATED(ver)) {
  707. if (!APIC_XAPIC(ver)) {
  708. v = apic_read(APIC_ARBPRI);
  709. pr_debug("... APIC ARBPRI: %08x (%02x)\n",
  710. v, v & APIC_ARBPRI_MASK);
  711. }
  712. v = apic_read(APIC_PROCPRI);
  713. pr_debug("... APIC PROCPRI: %08x\n", v);
  714. }
  715. /*
  716. * Remote read supported only in the 82489DX and local APIC for
  717. * Pentium processors.
  718. */
  719. if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
  720. v = apic_read(APIC_RRR);
  721. pr_debug("... APIC RRR: %08x\n", v);
  722. }
  723. v = apic_read(APIC_LDR);
  724. pr_debug("... APIC LDR: %08x\n", v);
  725. if (!x2apic_enabled()) {
  726. v = apic_read(APIC_DFR);
  727. pr_debug("... APIC DFR: %08x\n", v);
  728. }
  729. v = apic_read(APIC_SPIV);
  730. pr_debug("... APIC SPIV: %08x\n", v);
  731. pr_debug("... APIC ISR field:\n");
  732. print_APIC_field(APIC_ISR);
  733. pr_debug("... APIC TMR field:\n");
  734. print_APIC_field(APIC_TMR);
  735. pr_debug("... APIC IRR field:\n");
  736. print_APIC_field(APIC_IRR);
  737. /* !82489DX */
  738. if (APIC_INTEGRATED(ver)) {
  739. /* Due to the Pentium erratum 3AP. */
  740. if (maxlvt > 3)
  741. apic_write(APIC_ESR, 0);
  742. v = apic_read(APIC_ESR);
  743. pr_debug("... APIC ESR: %08x\n", v);
  744. }
  745. icr = apic_icr_read();
  746. pr_debug("... APIC ICR: %08x\n", (u32)icr);
  747. pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
  748. v = apic_read(APIC_LVTT);
  749. pr_debug("... APIC LVTT: %08x\n", v);
  750. if (maxlvt > 3) {
  751. /* PC is LVT#4. */
  752. v = apic_read(APIC_LVTPC);
  753. pr_debug("... APIC LVTPC: %08x\n", v);
  754. }
  755. v = apic_read(APIC_LVT0);
  756. pr_debug("... APIC LVT0: %08x\n", v);
  757. v = apic_read(APIC_LVT1);
  758. pr_debug("... APIC LVT1: %08x\n", v);
  759. if (maxlvt > 2) {
  760. /* ERR is LVT#3. */
  761. v = apic_read(APIC_LVTERR);
  762. pr_debug("... APIC LVTERR: %08x\n", v);
  763. }
  764. v = apic_read(APIC_TMICT);
  765. pr_debug("... APIC TMICT: %08x\n", v);
  766. v = apic_read(APIC_TMCCT);
  767. pr_debug("... APIC TMCCT: %08x\n", v);
  768. v = apic_read(APIC_TDCR);
  769. pr_debug("... APIC TDCR: %08x\n", v);
  770. if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
  771. v = apic_read(APIC_EFEAT);
  772. maxlvt = (v >> 16) & 0xff;
  773. pr_debug("... APIC EFEAT: %08x\n", v);
  774. v = apic_read(APIC_ECTRL);
  775. pr_debug("... APIC ECTRL: %08x\n", v);
  776. for (i = 0; i < maxlvt; i++) {
  777. v = apic_read(APIC_EILVTn(i));
  778. pr_debug("... APIC EILVT%d: %08x\n", i, v);
  779. }
  780. }
  781. pr_cont("\n");
  782. }
  783. static void __init print_local_APICs(int maxcpu)
  784. {
  785. int cpu;
  786. if (!maxcpu)
  787. return;
  788. preempt_disable();
  789. for_each_online_cpu(cpu) {
  790. if (cpu >= maxcpu)
  791. break;
  792. smp_call_function_single(cpu, print_local_APIC, NULL, 1);
  793. }
  794. preempt_enable();
  795. }
  796. static void __init print_PIC(void)
  797. {
  798. unsigned int v;
  799. unsigned long flags;
  800. if (!nr_legacy_irqs())
  801. return;
  802. pr_debug("\nprinting PIC contents\n");
  803. raw_spin_lock_irqsave(&i8259A_lock, flags);
  804. v = inb(0xa1) << 8 | inb(0x21);
  805. pr_debug("... PIC IMR: %04x\n", v);
  806. v = inb(0xa0) << 8 | inb(0x20);
  807. pr_debug("... PIC IRR: %04x\n", v);
  808. outb(0x0b, 0xa0);
  809. outb(0x0b, 0x20);
  810. v = inb(0xa0) << 8 | inb(0x20);
  811. outb(0x0a, 0xa0);
  812. outb(0x0a, 0x20);
  813. raw_spin_unlock_irqrestore(&i8259A_lock, flags);
  814. pr_debug("... PIC ISR: %04x\n", v);
  815. v = inb(0x4d1) << 8 | inb(0x4d0);
  816. pr_debug("... PIC ELCR: %04x\n", v);
  817. }
  818. static int show_lapic __initdata = 1;
  819. static __init int setup_show_lapic(char *arg)
  820. {
  821. int num = -1;
  822. if (strcmp(arg, "all") == 0) {
  823. show_lapic = CONFIG_NR_CPUS;
  824. } else {
  825. get_option(&arg, &num);
  826. if (num >= 0)
  827. show_lapic = num;
  828. }
  829. return 1;
  830. }
  831. __setup("show_lapic=", setup_show_lapic);
  832. static int __init print_ICs(void)
  833. {
  834. if (apic_verbosity == APIC_QUIET)
  835. return 0;
  836. print_PIC();
  837. /* don't print out if apic is not there */
  838. if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
  839. return 0;
  840. print_local_APICs(show_lapic);
  841. print_IO_APICs();
  842. return 0;
  843. }
  844. late_initcall(print_ICs);