vector.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859
  1. /*
  2. * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
  3. *
  4. * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
  5. * Moved from arch/x86/kernel/apic/io_apic.c.
  6. * Jiang Liu <jiang.liu@linux.intel.com>
  7. * Enable support of hierarchical irqdomains
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/interrupt.h>
  14. #include <linux/init.h>
  15. #include <linux/compiler.h>
  16. #include <linux/irqdomain.h>
  17. #include <linux/slab.h>
  18. #include <asm/hw_irq.h>
  19. #include <asm/apic.h>
  20. #include <asm/i8259.h>
  21. #include <asm/desc.h>
  22. #include <asm/irq_remapping.h>
  23. struct irq_domain *x86_vector_domain;
  24. static DEFINE_RAW_SPINLOCK(vector_lock);
  25. static struct irq_chip lapic_controller;
  26. #ifdef CONFIG_X86_IO_APIC
  27. static struct irq_cfg *legacy_irq_cfgs[NR_IRQS_LEGACY];
  28. #endif
  29. void lock_vector_lock(void)
  30. {
  31. /* Used to the online set of cpus does not change
  32. * during assign_irq_vector.
  33. */
  34. raw_spin_lock(&vector_lock);
  35. }
  36. void unlock_vector_lock(void)
  37. {
  38. raw_spin_unlock(&vector_lock);
  39. }
  40. struct irq_cfg *irq_cfg(unsigned int irq)
  41. {
  42. return irqd_cfg(irq_get_irq_data(irq));
  43. }
  44. struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
  45. {
  46. if (!irq_data)
  47. return NULL;
  48. while (irq_data->parent_data)
  49. irq_data = irq_data->parent_data;
  50. return irq_data->chip_data;
  51. }
  52. static struct irq_cfg *alloc_irq_cfg(int node)
  53. {
  54. struct irq_cfg *cfg;
  55. cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
  56. if (!cfg)
  57. return NULL;
  58. if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
  59. goto out_cfg;
  60. if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
  61. goto out_domain;
  62. return cfg;
  63. out_domain:
  64. free_cpumask_var(cfg->domain);
  65. out_cfg:
  66. kfree(cfg);
  67. return NULL;
  68. }
  69. struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
  70. {
  71. int res = irq_alloc_desc_at(at, node);
  72. struct irq_cfg *cfg;
  73. if (res < 0) {
  74. if (res != -EEXIST)
  75. return NULL;
  76. cfg = irq_cfg(at);
  77. if (cfg)
  78. return cfg;
  79. }
  80. cfg = alloc_irq_cfg(node);
  81. if (cfg)
  82. irq_set_chip_data(at, cfg);
  83. else
  84. irq_free_desc(at);
  85. return cfg;
  86. }
  87. static void free_irq_cfg(struct irq_cfg *cfg)
  88. {
  89. if (cfg) {
  90. free_cpumask_var(cfg->domain);
  91. free_cpumask_var(cfg->old_domain);
  92. kfree(cfg);
  93. }
  94. }
  95. static int
  96. __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
  97. {
  98. /*
  99. * NOTE! The local APIC isn't very good at handling
  100. * multiple interrupts at the same interrupt level.
  101. * As the interrupt level is determined by taking the
  102. * vector number and shifting that right by 4, we
  103. * want to spread these out a bit so that they don't
  104. * all fall in the same interrupt level.
  105. *
  106. * Also, we've got to be careful not to trash gate
  107. * 0x80, because int 0x80 is hm, kind of importantish. ;)
  108. */
  109. static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
  110. static int current_offset = VECTOR_OFFSET_START % 16;
  111. int cpu, err;
  112. cpumask_var_t tmp_mask;
  113. if (cfg->move_in_progress)
  114. return -EBUSY;
  115. if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
  116. return -ENOMEM;
  117. /* Only try and allocate irqs on cpus that are present */
  118. err = -ENOSPC;
  119. cpumask_clear(cfg->old_domain);
  120. cpu = cpumask_first_and(mask, cpu_online_mask);
  121. while (cpu < nr_cpu_ids) {
  122. int new_cpu, vector, offset;
  123. apic->vector_allocation_domain(cpu, tmp_mask, mask);
  124. if (cpumask_subset(tmp_mask, cfg->domain)) {
  125. err = 0;
  126. if (cpumask_equal(tmp_mask, cfg->domain))
  127. break;
  128. /*
  129. * New cpumask using the vector is a proper subset of
  130. * the current in use mask. So cleanup the vector
  131. * allocation for the members that are not used anymore.
  132. */
  133. cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
  134. cfg->move_in_progress =
  135. cpumask_intersects(cfg->old_domain, cpu_online_mask);
  136. cpumask_and(cfg->domain, cfg->domain, tmp_mask);
  137. break;
  138. }
  139. vector = current_vector;
  140. offset = current_offset;
  141. next:
  142. vector += 16;
  143. if (vector >= first_system_vector) {
  144. offset = (offset + 1) % 16;
  145. vector = FIRST_EXTERNAL_VECTOR + offset;
  146. }
  147. if (unlikely(current_vector == vector)) {
  148. cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
  149. cpumask_andnot(tmp_mask, mask, cfg->old_domain);
  150. cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
  151. continue;
  152. }
  153. if (test_bit(vector, used_vectors))
  154. goto next;
  155. for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
  156. if (per_cpu(vector_irq, new_cpu)[vector] >
  157. VECTOR_UNDEFINED)
  158. goto next;
  159. }
  160. /* Found one! */
  161. current_vector = vector;
  162. current_offset = offset;
  163. if (cfg->vector) {
  164. cpumask_copy(cfg->old_domain, cfg->domain);
  165. cfg->move_in_progress =
  166. cpumask_intersects(cfg->old_domain, cpu_online_mask);
  167. }
  168. for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
  169. per_cpu(vector_irq, new_cpu)[vector] = irq;
  170. cfg->vector = vector;
  171. cpumask_copy(cfg->domain, tmp_mask);
  172. err = 0;
  173. break;
  174. }
  175. free_cpumask_var(tmp_mask);
  176. if (!err) {
  177. /* cache destination APIC IDs into cfg->dest_apicid */
  178. err = apic->cpu_mask_to_apicid_and(mask, cfg->domain,
  179. &cfg->dest_apicid);
  180. }
  181. return err;
  182. }
  183. int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
  184. {
  185. int err;
  186. unsigned long flags;
  187. raw_spin_lock_irqsave(&vector_lock, flags);
  188. err = __assign_irq_vector(irq, cfg, mask);
  189. raw_spin_unlock_irqrestore(&vector_lock, flags);
  190. return err;
  191. }
  192. void clear_irq_vector(int irq, struct irq_cfg *cfg)
  193. {
  194. int cpu, vector;
  195. unsigned long flags;
  196. raw_spin_lock_irqsave(&vector_lock, flags);
  197. BUG_ON(!cfg->vector);
  198. vector = cfg->vector;
  199. for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
  200. per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
  201. cfg->vector = 0;
  202. cpumask_clear(cfg->domain);
  203. if (likely(!cfg->move_in_progress)) {
  204. raw_spin_unlock_irqrestore(&vector_lock, flags);
  205. return;
  206. }
  207. for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
  208. for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
  209. vector++) {
  210. if (per_cpu(vector_irq, cpu)[vector] != irq)
  211. continue;
  212. per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
  213. break;
  214. }
  215. }
  216. cfg->move_in_progress = 0;
  217. raw_spin_unlock_irqrestore(&vector_lock, flags);
  218. }
  219. void init_irq_alloc_info(struct irq_alloc_info *info,
  220. const struct cpumask *mask)
  221. {
  222. memset(info, 0, sizeof(*info));
  223. info->mask = mask;
  224. }
  225. void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
  226. {
  227. if (src)
  228. *dst = *src;
  229. else
  230. memset(dst, 0, sizeof(*dst));
  231. }
  232. static inline const struct cpumask *
  233. irq_alloc_info_get_mask(struct irq_alloc_info *info)
  234. {
  235. return (!info || !info->mask) ? apic->target_cpus() : info->mask;
  236. }
  237. static void x86_vector_free_irqs(struct irq_domain *domain,
  238. unsigned int virq, unsigned int nr_irqs)
  239. {
  240. struct irq_data *irq_data;
  241. int i;
  242. for (i = 0; i < nr_irqs; i++) {
  243. irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
  244. if (irq_data && irq_data->chip_data) {
  245. clear_irq_vector(virq + i, irq_data->chip_data);
  246. free_irq_cfg(irq_data->chip_data);
  247. #ifdef CONFIG_X86_IO_APIC
  248. if (virq + i < nr_legacy_irqs())
  249. legacy_irq_cfgs[virq + i] = NULL;
  250. #endif
  251. irq_domain_reset_irq_data(irq_data);
  252. }
  253. }
  254. }
  255. static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
  256. unsigned int nr_irqs, void *arg)
  257. {
  258. struct irq_alloc_info *info = arg;
  259. const struct cpumask *mask;
  260. struct irq_data *irq_data;
  261. struct irq_cfg *cfg;
  262. int i, err;
  263. if (disable_apic)
  264. return -ENXIO;
  265. /* Currently vector allocator can't guarantee contiguous allocations */
  266. if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
  267. return -ENOSYS;
  268. mask = irq_alloc_info_get_mask(info);
  269. for (i = 0; i < nr_irqs; i++) {
  270. irq_data = irq_domain_get_irq_data(domain, virq + i);
  271. BUG_ON(!irq_data);
  272. #ifdef CONFIG_X86_IO_APIC
  273. if (virq + i < nr_legacy_irqs() && legacy_irq_cfgs[virq + i])
  274. cfg = legacy_irq_cfgs[virq + i];
  275. else
  276. #endif
  277. cfg = alloc_irq_cfg(irq_data->node);
  278. if (!cfg) {
  279. err = -ENOMEM;
  280. goto error;
  281. }
  282. irq_data->chip = &lapic_controller;
  283. irq_data->chip_data = cfg;
  284. irq_data->hwirq = virq + i;
  285. err = assign_irq_vector(virq, cfg, mask);
  286. if (err)
  287. goto error;
  288. }
  289. return 0;
  290. error:
  291. x86_vector_free_irqs(domain, virq, i + 1);
  292. return err;
  293. }
  294. static struct irq_domain_ops x86_vector_domain_ops = {
  295. .alloc = x86_vector_alloc_irqs,
  296. .free = x86_vector_free_irqs,
  297. };
  298. int __init arch_probe_nr_irqs(void)
  299. {
  300. int nr;
  301. if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
  302. nr_irqs = NR_VECTORS * nr_cpu_ids;
  303. nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
  304. #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
  305. /*
  306. * for MSI and HT dyn irq
  307. */
  308. if (gsi_top <= NR_IRQS_LEGACY)
  309. nr += 8 * nr_cpu_ids;
  310. else
  311. nr += gsi_top * 16;
  312. #endif
  313. if (nr < nr_irqs)
  314. nr_irqs = nr;
  315. return nr_legacy_irqs();
  316. }
  317. #ifdef CONFIG_X86_IO_APIC
  318. static void init_legacy_irqs(void)
  319. {
  320. int i, node = cpu_to_node(0);
  321. struct irq_cfg *cfg;
  322. /*
  323. * For legacy IRQ's, start with assigning irq0 to irq15 to
  324. * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
  325. */
  326. for (i = 0; i < nr_legacy_irqs(); i++) {
  327. cfg = legacy_irq_cfgs[i] = alloc_irq_cfg(node);
  328. BUG_ON(!cfg);
  329. /*
  330. * For legacy IRQ's, start with assigning irq0 to irq15 to
  331. * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
  332. */
  333. cfg->vector = IRQ0_VECTOR + i;
  334. cpumask_setall(cfg->domain);
  335. irq_set_chip_data(i, cfg);
  336. }
  337. }
  338. #else
  339. static void init_legacy_irqs(void) { }
  340. #endif
  341. int __init arch_early_irq_init(void)
  342. {
  343. init_legacy_irqs();
  344. x86_vector_domain = irq_domain_add_tree(NULL, &x86_vector_domain_ops,
  345. NULL);
  346. BUG_ON(x86_vector_domain == NULL);
  347. irq_set_default_host(x86_vector_domain);
  348. arch_init_msi_domain(x86_vector_domain);
  349. arch_init_htirq_domain(x86_vector_domain);
  350. return arch_early_ioapic_init();
  351. }
  352. static void __setup_vector_irq(int cpu)
  353. {
  354. /* Initialize vector_irq on a new cpu */
  355. int irq, vector;
  356. struct irq_cfg *cfg;
  357. /*
  358. * vector_lock will make sure that we don't run into irq vector
  359. * assignments that might be happening on another cpu in parallel,
  360. * while we setup our initial vector to irq mappings.
  361. */
  362. raw_spin_lock(&vector_lock);
  363. /* Mark the inuse vectors */
  364. for_each_active_irq(irq) {
  365. cfg = irq_cfg(irq);
  366. if (!cfg)
  367. continue;
  368. if (!cpumask_test_cpu(cpu, cfg->domain))
  369. continue;
  370. vector = cfg->vector;
  371. per_cpu(vector_irq, cpu)[vector] = irq;
  372. }
  373. /* Mark the free vectors */
  374. for (vector = 0; vector < NR_VECTORS; ++vector) {
  375. irq = per_cpu(vector_irq, cpu)[vector];
  376. if (irq <= VECTOR_UNDEFINED)
  377. continue;
  378. cfg = irq_cfg(irq);
  379. if (!cpumask_test_cpu(cpu, cfg->domain))
  380. per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
  381. }
  382. raw_spin_unlock(&vector_lock);
  383. }
  384. /*
  385. * Setup the vector to irq mappings.
  386. */
  387. void setup_vector_irq(int cpu)
  388. {
  389. int irq;
  390. /*
  391. * On most of the platforms, legacy PIC delivers the interrupts on the
  392. * boot cpu. But there are certain platforms where PIC interrupts are
  393. * delivered to multiple cpu's. If the legacy IRQ is handled by the
  394. * legacy PIC, for the new cpu that is coming online, setup the static
  395. * legacy vector to irq mapping:
  396. */
  397. for (irq = 0; irq < nr_legacy_irqs(); irq++)
  398. per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
  399. __setup_vector_irq(cpu);
  400. }
  401. int apic_retrigger_irq(struct irq_data *data)
  402. {
  403. struct irq_cfg *cfg = irqd_cfg(data);
  404. unsigned long flags;
  405. int cpu;
  406. raw_spin_lock_irqsave(&vector_lock, flags);
  407. cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
  408. apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
  409. raw_spin_unlock_irqrestore(&vector_lock, flags);
  410. return 1;
  411. }
  412. void apic_ack_edge(struct irq_data *data)
  413. {
  414. irq_complete_move(irqd_cfg(data));
  415. irq_move_irq(data);
  416. ack_APIC_irq();
  417. }
  418. /*
  419. * Either sets data->affinity to a valid value, and returns
  420. * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
  421. * leaves data->affinity untouched.
  422. */
  423. int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
  424. unsigned int *dest_id)
  425. {
  426. struct irq_cfg *cfg = irqd_cfg(data);
  427. unsigned int irq = data->irq;
  428. int err;
  429. if (!config_enabled(CONFIG_SMP))
  430. return -EPERM;
  431. if (!cpumask_intersects(mask, cpu_online_mask))
  432. return -EINVAL;
  433. err = assign_irq_vector(irq, cfg, mask);
  434. if (err)
  435. return err;
  436. err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
  437. if (err) {
  438. if (assign_irq_vector(irq, cfg, data->affinity))
  439. pr_err("Failed to recover vector for irq %d\n", irq);
  440. return err;
  441. }
  442. cpumask_copy(data->affinity, mask);
  443. return 0;
  444. }
  445. static int vector_set_affinity(struct irq_data *irq_data,
  446. const struct cpumask *dest, bool force)
  447. {
  448. struct irq_cfg *cfg = irq_data->chip_data;
  449. int err, irq = irq_data->irq;
  450. if (!config_enabled(CONFIG_SMP))
  451. return -EPERM;
  452. if (!cpumask_intersects(dest, cpu_online_mask))
  453. return -EINVAL;
  454. err = assign_irq_vector(irq, cfg, dest);
  455. if (err) {
  456. struct irq_data *top = irq_get_irq_data(irq);
  457. if (assign_irq_vector(irq, cfg, top->affinity))
  458. pr_err("Failed to recover vector for irq %d\n", irq);
  459. return err;
  460. }
  461. return IRQ_SET_MASK_OK;
  462. }
  463. static struct irq_chip lapic_controller = {
  464. .irq_ack = apic_ack_edge,
  465. .irq_set_affinity = vector_set_affinity,
  466. .irq_retrigger = apic_retrigger_irq,
  467. };
  468. #ifdef CONFIG_SMP
  469. void send_cleanup_vector(struct irq_cfg *cfg)
  470. {
  471. cpumask_var_t cleanup_mask;
  472. if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
  473. unsigned int i;
  474. for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
  475. apic->send_IPI_mask(cpumask_of(i),
  476. IRQ_MOVE_CLEANUP_VECTOR);
  477. } else {
  478. cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
  479. apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
  480. free_cpumask_var(cleanup_mask);
  481. }
  482. cfg->move_in_progress = 0;
  483. }
  484. asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
  485. {
  486. unsigned vector, me;
  487. ack_APIC_irq();
  488. irq_enter();
  489. exit_idle();
  490. me = smp_processor_id();
  491. for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
  492. int irq;
  493. unsigned int irr;
  494. struct irq_desc *desc;
  495. struct irq_cfg *cfg;
  496. irq = __this_cpu_read(vector_irq[vector]);
  497. if (irq <= VECTOR_UNDEFINED)
  498. continue;
  499. desc = irq_to_desc(irq);
  500. if (!desc)
  501. continue;
  502. cfg = irq_cfg(irq);
  503. if (!cfg)
  504. continue;
  505. raw_spin_lock(&desc->lock);
  506. /*
  507. * Check if the irq migration is in progress. If so, we
  508. * haven't received the cleanup request yet for this irq.
  509. */
  510. if (cfg->move_in_progress)
  511. goto unlock;
  512. if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
  513. goto unlock;
  514. irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
  515. /*
  516. * Check if the vector that needs to be cleanedup is
  517. * registered at the cpu's IRR. If so, then this is not
  518. * the best time to clean it up. Lets clean it up in the
  519. * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
  520. * to myself.
  521. */
  522. if (irr & (1 << (vector % 32))) {
  523. apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
  524. goto unlock;
  525. }
  526. __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
  527. unlock:
  528. raw_spin_unlock(&desc->lock);
  529. }
  530. irq_exit();
  531. }
  532. static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
  533. {
  534. unsigned me;
  535. if (likely(!cfg->move_in_progress))
  536. return;
  537. me = smp_processor_id();
  538. if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
  539. send_cleanup_vector(cfg);
  540. }
  541. void irq_complete_move(struct irq_cfg *cfg)
  542. {
  543. __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
  544. }
  545. void irq_force_complete_move(int irq)
  546. {
  547. struct irq_cfg *cfg = irq_cfg(irq);
  548. if (!cfg)
  549. return;
  550. __irq_complete_move(cfg, cfg->vector);
  551. }
  552. #endif
  553. static void __init print_APIC_field(int base)
  554. {
  555. int i;
  556. printk(KERN_DEBUG);
  557. for (i = 0; i < 8; i++)
  558. pr_cont("%08x", apic_read(base + i*0x10));
  559. pr_cont("\n");
  560. }
  561. static void __init print_local_APIC(void *dummy)
  562. {
  563. unsigned int i, v, ver, maxlvt;
  564. u64 icr;
  565. pr_debug("printing local APIC contents on CPU#%d/%d:\n",
  566. smp_processor_id(), hard_smp_processor_id());
  567. v = apic_read(APIC_ID);
  568. pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
  569. v = apic_read(APIC_LVR);
  570. pr_info("... APIC VERSION: %08x\n", v);
  571. ver = GET_APIC_VERSION(v);
  572. maxlvt = lapic_get_maxlvt();
  573. v = apic_read(APIC_TASKPRI);
  574. pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
  575. /* !82489DX */
  576. if (APIC_INTEGRATED(ver)) {
  577. if (!APIC_XAPIC(ver)) {
  578. v = apic_read(APIC_ARBPRI);
  579. pr_debug("... APIC ARBPRI: %08x (%02x)\n",
  580. v, v & APIC_ARBPRI_MASK);
  581. }
  582. v = apic_read(APIC_PROCPRI);
  583. pr_debug("... APIC PROCPRI: %08x\n", v);
  584. }
  585. /*
  586. * Remote read supported only in the 82489DX and local APIC for
  587. * Pentium processors.
  588. */
  589. if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
  590. v = apic_read(APIC_RRR);
  591. pr_debug("... APIC RRR: %08x\n", v);
  592. }
  593. v = apic_read(APIC_LDR);
  594. pr_debug("... APIC LDR: %08x\n", v);
  595. if (!x2apic_enabled()) {
  596. v = apic_read(APIC_DFR);
  597. pr_debug("... APIC DFR: %08x\n", v);
  598. }
  599. v = apic_read(APIC_SPIV);
  600. pr_debug("... APIC SPIV: %08x\n", v);
  601. pr_debug("... APIC ISR field:\n");
  602. print_APIC_field(APIC_ISR);
  603. pr_debug("... APIC TMR field:\n");
  604. print_APIC_field(APIC_TMR);
  605. pr_debug("... APIC IRR field:\n");
  606. print_APIC_field(APIC_IRR);
  607. /* !82489DX */
  608. if (APIC_INTEGRATED(ver)) {
  609. /* Due to the Pentium erratum 3AP. */
  610. if (maxlvt > 3)
  611. apic_write(APIC_ESR, 0);
  612. v = apic_read(APIC_ESR);
  613. pr_debug("... APIC ESR: %08x\n", v);
  614. }
  615. icr = apic_icr_read();
  616. pr_debug("... APIC ICR: %08x\n", (u32)icr);
  617. pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
  618. v = apic_read(APIC_LVTT);
  619. pr_debug("... APIC LVTT: %08x\n", v);
  620. if (maxlvt > 3) {
  621. /* PC is LVT#4. */
  622. v = apic_read(APIC_LVTPC);
  623. pr_debug("... APIC LVTPC: %08x\n", v);
  624. }
  625. v = apic_read(APIC_LVT0);
  626. pr_debug("... APIC LVT0: %08x\n", v);
  627. v = apic_read(APIC_LVT1);
  628. pr_debug("... APIC LVT1: %08x\n", v);
  629. if (maxlvt > 2) {
  630. /* ERR is LVT#3. */
  631. v = apic_read(APIC_LVTERR);
  632. pr_debug("... APIC LVTERR: %08x\n", v);
  633. }
  634. v = apic_read(APIC_TMICT);
  635. pr_debug("... APIC TMICT: %08x\n", v);
  636. v = apic_read(APIC_TMCCT);
  637. pr_debug("... APIC TMCCT: %08x\n", v);
  638. v = apic_read(APIC_TDCR);
  639. pr_debug("... APIC TDCR: %08x\n", v);
  640. if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
  641. v = apic_read(APIC_EFEAT);
  642. maxlvt = (v >> 16) & 0xff;
  643. pr_debug("... APIC EFEAT: %08x\n", v);
  644. v = apic_read(APIC_ECTRL);
  645. pr_debug("... APIC ECTRL: %08x\n", v);
  646. for (i = 0; i < maxlvt; i++) {
  647. v = apic_read(APIC_EILVTn(i));
  648. pr_debug("... APIC EILVT%d: %08x\n", i, v);
  649. }
  650. }
  651. pr_cont("\n");
  652. }
  653. static void __init print_local_APICs(int maxcpu)
  654. {
  655. int cpu;
  656. if (!maxcpu)
  657. return;
  658. preempt_disable();
  659. for_each_online_cpu(cpu) {
  660. if (cpu >= maxcpu)
  661. break;
  662. smp_call_function_single(cpu, print_local_APIC, NULL, 1);
  663. }
  664. preempt_enable();
  665. }
  666. static void __init print_PIC(void)
  667. {
  668. unsigned int v;
  669. unsigned long flags;
  670. if (!nr_legacy_irqs())
  671. return;
  672. pr_debug("\nprinting PIC contents\n");
  673. raw_spin_lock_irqsave(&i8259A_lock, flags);
  674. v = inb(0xa1) << 8 | inb(0x21);
  675. pr_debug("... PIC IMR: %04x\n", v);
  676. v = inb(0xa0) << 8 | inb(0x20);
  677. pr_debug("... PIC IRR: %04x\n", v);
  678. outb(0x0b, 0xa0);
  679. outb(0x0b, 0x20);
  680. v = inb(0xa0) << 8 | inb(0x20);
  681. outb(0x0a, 0xa0);
  682. outb(0x0a, 0x20);
  683. raw_spin_unlock_irqrestore(&i8259A_lock, flags);
  684. pr_debug("... PIC ISR: %04x\n", v);
  685. v = inb(0x4d1) << 8 | inb(0x4d0);
  686. pr_debug("... PIC ELCR: %04x\n", v);
  687. }
  688. static int show_lapic __initdata = 1;
  689. static __init int setup_show_lapic(char *arg)
  690. {
  691. int num = -1;
  692. if (strcmp(arg, "all") == 0) {
  693. show_lapic = CONFIG_NR_CPUS;
  694. } else {
  695. get_option(&arg, &num);
  696. if (num >= 0)
  697. show_lapic = num;
  698. }
  699. return 1;
  700. }
  701. __setup("show_lapic=", setup_show_lapic);
  702. static int __init print_ICs(void)
  703. {
  704. if (apic_verbosity == APIC_QUIET)
  705. return 0;
  706. print_PIC();
  707. /* don't print out if apic is not there */
  708. if (!cpu_has_apic && !apic_from_smp_config())
  709. return 0;
  710. print_local_APICs(show_lapic);
  711. print_IO_APICs();
  712. return 0;
  713. }
  714. late_initcall(print_ICs);