vector.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247
  1. /*
  2. * Local APIC related interfaces to support IOAPIC, MSI, etc.
  3. *
  4. * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
  5. * Moved from arch/x86/kernel/apic/io_apic.c.
  6. * Jiang Liu <jiang.liu@linux.intel.com>
  7. * Enable support of hierarchical irqdomains
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/init.h>
  17. #include <linux/compiler.h>
  18. #include <linux/slab.h>
  19. #include <asm/irqdomain.h>
  20. #include <asm/hw_irq.h>
  21. #include <asm/apic.h>
  22. #include <asm/i8259.h>
  23. #include <asm/desc.h>
  24. #include <asm/irq_remapping.h>
  25. #include <asm/trace/irq_vectors.h>
  26. struct apic_chip_data {
  27. struct irq_cfg hw_irq_cfg;
  28. unsigned int vector;
  29. unsigned int prev_vector;
  30. unsigned int cpu;
  31. unsigned int prev_cpu;
  32. unsigned int irq;
  33. struct hlist_node clist;
  34. unsigned int move_in_progress : 1,
  35. is_managed : 1,
  36. can_reserve : 1,
  37. has_reserved : 1;
  38. };
  39. struct irq_domain *x86_vector_domain;
  40. EXPORT_SYMBOL_GPL(x86_vector_domain);
  41. static DEFINE_RAW_SPINLOCK(vector_lock);
  42. static cpumask_var_t vector_searchmask;
  43. static struct irq_chip lapic_controller;
  44. static struct irq_matrix *vector_matrix;
  45. #ifdef CONFIG_SMP
  46. static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
  47. #endif
  48. void lock_vector_lock(void)
  49. {
  50. /* Used to the online set of cpus does not change
  51. * during assign_irq_vector.
  52. */
  53. raw_spin_lock(&vector_lock);
  54. }
  55. void unlock_vector_lock(void)
  56. {
  57. raw_spin_unlock(&vector_lock);
  58. }
  59. void init_irq_alloc_info(struct irq_alloc_info *info,
  60. const struct cpumask *mask)
  61. {
  62. memset(info, 0, sizeof(*info));
  63. info->mask = mask;
  64. }
  65. void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
  66. {
  67. if (src)
  68. *dst = *src;
  69. else
  70. memset(dst, 0, sizeof(*dst));
  71. }
  72. static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
  73. {
  74. if (!irqd)
  75. return NULL;
  76. while (irqd->parent_data)
  77. irqd = irqd->parent_data;
  78. return irqd->chip_data;
  79. }
  80. struct irq_cfg *irqd_cfg(struct irq_data *irqd)
  81. {
  82. struct apic_chip_data *apicd = apic_chip_data(irqd);
  83. return apicd ? &apicd->hw_irq_cfg : NULL;
  84. }
  85. EXPORT_SYMBOL_GPL(irqd_cfg);
  86. struct irq_cfg *irq_cfg(unsigned int irq)
  87. {
  88. return irqd_cfg(irq_get_irq_data(irq));
  89. }
  90. static struct apic_chip_data *alloc_apic_chip_data(int node)
  91. {
  92. struct apic_chip_data *apicd;
  93. apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
  94. if (apicd)
  95. INIT_HLIST_NODE(&apicd->clist);
  96. return apicd;
  97. }
  98. static void free_apic_chip_data(struct apic_chip_data *apicd)
  99. {
  100. kfree(apicd);
  101. }
  102. static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
  103. unsigned int cpu)
  104. {
  105. struct apic_chip_data *apicd = apic_chip_data(irqd);
  106. lockdep_assert_held(&vector_lock);
  107. apicd->hw_irq_cfg.vector = vector;
  108. apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
  109. irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
  110. trace_vector_config(irqd->irq, vector, cpu,
  111. apicd->hw_irq_cfg.dest_apicid);
  112. }
  113. static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
  114. unsigned int newcpu)
  115. {
  116. struct apic_chip_data *apicd = apic_chip_data(irqd);
  117. struct irq_desc *desc = irq_data_to_desc(irqd);
  118. bool managed = irqd_affinity_is_managed(irqd);
  119. lockdep_assert_held(&vector_lock);
  120. trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
  121. apicd->cpu);
  122. /*
  123. * If there is no vector associated or if the associated vector is
  124. * the shutdown vector, which is associated to make PCI/MSI
  125. * shutdown mode work, then there is nothing to release. Clear out
  126. * prev_vector for this and the offlined target case.
  127. */
  128. apicd->prev_vector = 0;
  129. if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
  130. goto setnew;
  131. /*
  132. * If the target CPU of the previous vector is online, then mark
  133. * the vector as move in progress and store it for cleanup when the
  134. * first interrupt on the new vector arrives. If the target CPU is
  135. * offline then the regular release mechanism via the cleanup
  136. * vector is not possible and the vector can be immediately freed
  137. * in the underlying matrix allocator.
  138. */
  139. if (cpu_online(apicd->cpu)) {
  140. apicd->move_in_progress = true;
  141. apicd->prev_vector = apicd->vector;
  142. apicd->prev_cpu = apicd->cpu;
  143. } else {
  144. irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
  145. managed);
  146. }
  147. setnew:
  148. apicd->vector = newvec;
  149. apicd->cpu = newcpu;
  150. BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
  151. per_cpu(vector_irq, newcpu)[newvec] = desc;
  152. }
  153. static void vector_assign_managed_shutdown(struct irq_data *irqd)
  154. {
  155. unsigned int cpu = cpumask_first(cpu_online_mask);
  156. apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
  157. }
  158. static int reserve_managed_vector(struct irq_data *irqd)
  159. {
  160. const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
  161. struct apic_chip_data *apicd = apic_chip_data(irqd);
  162. unsigned long flags;
  163. int ret;
  164. raw_spin_lock_irqsave(&vector_lock, flags);
  165. apicd->is_managed = true;
  166. ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
  167. raw_spin_unlock_irqrestore(&vector_lock, flags);
  168. trace_vector_reserve_managed(irqd->irq, ret);
  169. return ret;
  170. }
  171. static void reserve_irq_vector_locked(struct irq_data *irqd)
  172. {
  173. struct apic_chip_data *apicd = apic_chip_data(irqd);
  174. irq_matrix_reserve(vector_matrix);
  175. apicd->can_reserve = true;
  176. apicd->has_reserved = true;
  177. irqd_set_can_reserve(irqd);
  178. trace_vector_reserve(irqd->irq, 0);
  179. vector_assign_managed_shutdown(irqd);
  180. }
  181. static int reserve_irq_vector(struct irq_data *irqd)
  182. {
  183. unsigned long flags;
  184. raw_spin_lock_irqsave(&vector_lock, flags);
  185. reserve_irq_vector_locked(irqd);
  186. raw_spin_unlock_irqrestore(&vector_lock, flags);
  187. return 0;
  188. }
  189. static int
  190. assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
  191. {
  192. struct apic_chip_data *apicd = apic_chip_data(irqd);
  193. bool resvd = apicd->has_reserved;
  194. unsigned int cpu = apicd->cpu;
  195. int vector = apicd->vector;
  196. lockdep_assert_held(&vector_lock);
  197. /*
  198. * If the current target CPU is online and in the new requested
  199. * affinity mask, there is no point in moving the interrupt from
  200. * one CPU to another.
  201. */
  202. if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
  203. return 0;
  204. /*
  205. * Careful here. @apicd might either have move_in_progress set or
  206. * be enqueued for cleanup. Assigning a new vector would either
  207. * leave a stale vector on some CPU around or in case of a pending
  208. * cleanup corrupt the hlist.
  209. */
  210. if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
  211. return -EBUSY;
  212. vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
  213. trace_vector_alloc(irqd->irq, vector, resvd, vector);
  214. if (vector < 0)
  215. return vector;
  216. apic_update_vector(irqd, vector, cpu);
  217. apic_update_irq_cfg(irqd, vector, cpu);
  218. return 0;
  219. }
  220. static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
  221. {
  222. unsigned long flags;
  223. int ret;
  224. raw_spin_lock_irqsave(&vector_lock, flags);
  225. cpumask_and(vector_searchmask, dest, cpu_online_mask);
  226. ret = assign_vector_locked(irqd, vector_searchmask);
  227. raw_spin_unlock_irqrestore(&vector_lock, flags);
  228. return ret;
  229. }
  230. static int assign_irq_vector_any_locked(struct irq_data *irqd)
  231. {
  232. /* Get the affinity mask - either irq_default_affinity or (user) set */
  233. const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
  234. int node = irq_data_get_node(irqd);
  235. if (node == NUMA_NO_NODE)
  236. goto all;
  237. /* Try the intersection of @affmsk and node mask */
  238. cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
  239. if (!assign_vector_locked(irqd, vector_searchmask))
  240. return 0;
  241. /* Try the node mask */
  242. if (!assign_vector_locked(irqd, cpumask_of_node(node)))
  243. return 0;
  244. all:
  245. /* Try the full affinity mask */
  246. cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
  247. if (!assign_vector_locked(irqd, vector_searchmask))
  248. return 0;
  249. /* Try the full online mask */
  250. return assign_vector_locked(irqd, cpu_online_mask);
  251. }
  252. static int
  253. assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
  254. {
  255. if (irqd_affinity_is_managed(irqd))
  256. return reserve_managed_vector(irqd);
  257. if (info->mask)
  258. return assign_irq_vector(irqd, info->mask);
  259. /*
  260. * Make only a global reservation with no guarantee. A real vector
  261. * is associated at activation time.
  262. */
  263. return reserve_irq_vector(irqd);
  264. }
  265. static int
  266. assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
  267. {
  268. const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
  269. struct apic_chip_data *apicd = apic_chip_data(irqd);
  270. int vector, cpu;
  271. cpumask_and(vector_searchmask, vector_searchmask, affmsk);
  272. cpu = cpumask_first(vector_searchmask);
  273. if (cpu >= nr_cpu_ids)
  274. return -EINVAL;
  275. /* set_affinity might call here for nothing */
  276. if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
  277. return 0;
  278. vector = irq_matrix_alloc_managed(vector_matrix, cpu);
  279. trace_vector_alloc_managed(irqd->irq, vector, vector);
  280. if (vector < 0)
  281. return vector;
  282. apic_update_vector(irqd, vector, cpu);
  283. apic_update_irq_cfg(irqd, vector, cpu);
  284. return 0;
  285. }
  286. static void clear_irq_vector(struct irq_data *irqd)
  287. {
  288. struct apic_chip_data *apicd = apic_chip_data(irqd);
  289. bool managed = irqd_affinity_is_managed(irqd);
  290. unsigned int vector = apicd->vector;
  291. lockdep_assert_held(&vector_lock);
  292. if (!vector)
  293. return;
  294. trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
  295. apicd->prev_cpu);
  296. per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
  297. irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
  298. apicd->vector = 0;
  299. /* Clean up move in progress */
  300. vector = apicd->prev_vector;
  301. if (!vector)
  302. return;
  303. per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
  304. irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
  305. apicd->prev_vector = 0;
  306. apicd->move_in_progress = 0;
  307. hlist_del_init(&apicd->clist);
  308. }
  309. static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
  310. {
  311. struct apic_chip_data *apicd = apic_chip_data(irqd);
  312. unsigned long flags;
  313. trace_vector_deactivate(irqd->irq, apicd->is_managed,
  314. apicd->can_reserve, false);
  315. /* Regular fixed assigned interrupt */
  316. if (!apicd->is_managed && !apicd->can_reserve)
  317. return;
  318. /* If the interrupt has a global reservation, nothing to do */
  319. if (apicd->has_reserved)
  320. return;
  321. raw_spin_lock_irqsave(&vector_lock, flags);
  322. clear_irq_vector(irqd);
  323. if (apicd->can_reserve)
  324. reserve_irq_vector_locked(irqd);
  325. else
  326. vector_assign_managed_shutdown(irqd);
  327. raw_spin_unlock_irqrestore(&vector_lock, flags);
  328. }
  329. static int activate_reserved(struct irq_data *irqd)
  330. {
  331. struct apic_chip_data *apicd = apic_chip_data(irqd);
  332. int ret;
  333. ret = assign_irq_vector_any_locked(irqd);
  334. if (!ret) {
  335. apicd->has_reserved = false;
  336. /*
  337. * Core might have disabled reservation mode after
  338. * allocating the irq descriptor. Ideally this should
  339. * happen before allocation time, but that would require
  340. * completely convoluted ways of transporting that
  341. * information.
  342. */
  343. if (!irqd_can_reserve(irqd))
  344. apicd->can_reserve = false;
  345. }
  346. return ret;
  347. }
  348. static int activate_managed(struct irq_data *irqd)
  349. {
  350. const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
  351. int ret;
  352. cpumask_and(vector_searchmask, dest, cpu_online_mask);
  353. if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
  354. /* Something in the core code broke! Survive gracefully */
  355. pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
  356. return -EINVAL;
  357. }
  358. ret = assign_managed_vector(irqd, vector_searchmask);
  359. /*
  360. * This should not happen. The vector reservation got buggered. Handle
  361. * it gracefully.
  362. */
  363. if (WARN_ON_ONCE(ret < 0)) {
  364. pr_err("Managed startup irq %u, no vector available\n",
  365. irqd->irq);
  366. }
  367. return ret;
  368. }
  369. static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
  370. bool reserve)
  371. {
  372. struct apic_chip_data *apicd = apic_chip_data(irqd);
  373. unsigned long flags;
  374. int ret = 0;
  375. trace_vector_activate(irqd->irq, apicd->is_managed,
  376. apicd->can_reserve, reserve);
  377. /* Nothing to do for fixed assigned vectors */
  378. if (!apicd->can_reserve && !apicd->is_managed)
  379. return 0;
  380. raw_spin_lock_irqsave(&vector_lock, flags);
  381. if (reserve || irqd_is_managed_and_shutdown(irqd))
  382. vector_assign_managed_shutdown(irqd);
  383. else if (apicd->is_managed)
  384. ret = activate_managed(irqd);
  385. else if (apicd->has_reserved)
  386. ret = activate_reserved(irqd);
  387. raw_spin_unlock_irqrestore(&vector_lock, flags);
  388. return ret;
  389. }
  390. static void vector_free_reserved_and_managed(struct irq_data *irqd)
  391. {
  392. const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
  393. struct apic_chip_data *apicd = apic_chip_data(irqd);
  394. trace_vector_teardown(irqd->irq, apicd->is_managed,
  395. apicd->has_reserved);
  396. if (apicd->has_reserved)
  397. irq_matrix_remove_reserved(vector_matrix);
  398. if (apicd->is_managed)
  399. irq_matrix_remove_managed(vector_matrix, dest);
  400. }
  401. static void x86_vector_free_irqs(struct irq_domain *domain,
  402. unsigned int virq, unsigned int nr_irqs)
  403. {
  404. struct apic_chip_data *apicd;
  405. struct irq_data *irqd;
  406. unsigned long flags;
  407. int i;
  408. for (i = 0; i < nr_irqs; i++) {
  409. irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
  410. if (irqd && irqd->chip_data) {
  411. raw_spin_lock_irqsave(&vector_lock, flags);
  412. clear_irq_vector(irqd);
  413. vector_free_reserved_and_managed(irqd);
  414. apicd = irqd->chip_data;
  415. irq_domain_reset_irq_data(irqd);
  416. raw_spin_unlock_irqrestore(&vector_lock, flags);
  417. free_apic_chip_data(apicd);
  418. }
  419. }
  420. }
  421. static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
  422. struct apic_chip_data *apicd)
  423. {
  424. unsigned long flags;
  425. bool realloc = false;
  426. apicd->vector = ISA_IRQ_VECTOR(virq);
  427. apicd->cpu = 0;
  428. raw_spin_lock_irqsave(&vector_lock, flags);
  429. /*
  430. * If the interrupt is activated, then it must stay at this vector
  431. * position. That's usually the timer interrupt (0).
  432. */
  433. if (irqd_is_activated(irqd)) {
  434. trace_vector_setup(virq, true, 0);
  435. apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
  436. } else {
  437. /* Release the vector */
  438. apicd->can_reserve = true;
  439. irqd_set_can_reserve(irqd);
  440. clear_irq_vector(irqd);
  441. realloc = true;
  442. }
  443. raw_spin_unlock_irqrestore(&vector_lock, flags);
  444. return realloc;
  445. }
  446. static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
  447. unsigned int nr_irqs, void *arg)
  448. {
  449. struct irq_alloc_info *info = arg;
  450. struct apic_chip_data *apicd;
  451. struct irq_data *irqd;
  452. int i, err, node;
  453. if (disable_apic)
  454. return -ENXIO;
  455. /* Currently vector allocator can't guarantee contiguous allocations */
  456. if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
  457. return -ENOSYS;
  458. for (i = 0; i < nr_irqs; i++) {
  459. irqd = irq_domain_get_irq_data(domain, virq + i);
  460. BUG_ON(!irqd);
  461. node = irq_data_get_node(irqd);
  462. WARN_ON_ONCE(irqd->chip_data);
  463. apicd = alloc_apic_chip_data(node);
  464. if (!apicd) {
  465. err = -ENOMEM;
  466. goto error;
  467. }
  468. apicd->irq = virq + i;
  469. irqd->chip = &lapic_controller;
  470. irqd->chip_data = apicd;
  471. irqd->hwirq = virq + i;
  472. irqd_set_single_target(irqd);
  473. /*
  474. * Legacy vectors are already assigned when the IOAPIC
  475. * takes them over. They stay on the same vector. This is
  476. * required for check_timer() to work correctly as it might
  477. * switch back to legacy mode. Only update the hardware
  478. * config.
  479. */
  480. if (info->flags & X86_IRQ_ALLOC_LEGACY) {
  481. if (!vector_configure_legacy(virq + i, irqd, apicd))
  482. continue;
  483. }
  484. err = assign_irq_vector_policy(irqd, info);
  485. trace_vector_setup(virq + i, false, err);
  486. if (err) {
  487. irqd->chip_data = NULL;
  488. free_apic_chip_data(apicd);
  489. goto error;
  490. }
  491. }
  492. return 0;
  493. error:
  494. x86_vector_free_irqs(domain, virq, i);
  495. return err;
  496. }
  497. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  498. static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
  499. struct irq_data *irqd, int ind)
  500. {
  501. struct apic_chip_data apicd;
  502. unsigned long flags;
  503. int irq;
  504. if (!irqd) {
  505. irq_matrix_debug_show(m, vector_matrix, ind);
  506. return;
  507. }
  508. irq = irqd->irq;
  509. if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
  510. seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
  511. seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
  512. return;
  513. }
  514. if (!irqd->chip_data) {
  515. seq_printf(m, "%*sVector: Not assigned\n", ind, "");
  516. return;
  517. }
  518. raw_spin_lock_irqsave(&vector_lock, flags);
  519. memcpy(&apicd, irqd->chip_data, sizeof(apicd));
  520. raw_spin_unlock_irqrestore(&vector_lock, flags);
  521. seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
  522. seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
  523. if (apicd.prev_vector) {
  524. seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
  525. seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
  526. }
  527. seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
  528. seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
  529. seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
  530. seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
  531. seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
  532. }
  533. #endif
  534. static const struct irq_domain_ops x86_vector_domain_ops = {
  535. .alloc = x86_vector_alloc_irqs,
  536. .free = x86_vector_free_irqs,
  537. .activate = x86_vector_activate,
  538. .deactivate = x86_vector_deactivate,
  539. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  540. .debug_show = x86_vector_debug_show,
  541. #endif
  542. };
  543. int __init arch_probe_nr_irqs(void)
  544. {
  545. int nr;
  546. if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
  547. nr_irqs = NR_VECTORS * nr_cpu_ids;
  548. nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
  549. #if defined(CONFIG_PCI_MSI)
  550. /*
  551. * for MSI and HT dyn irq
  552. */
  553. if (gsi_top <= NR_IRQS_LEGACY)
  554. nr += 8 * nr_cpu_ids;
  555. else
  556. nr += gsi_top * 16;
  557. #endif
  558. if (nr < nr_irqs)
  559. nr_irqs = nr;
  560. /*
  561. * We don't know if PIC is present at this point so we need to do
  562. * probe() to get the right number of legacy IRQs.
  563. */
  564. return legacy_pic->probe();
  565. }
  566. void lapic_assign_legacy_vector(unsigned int irq, bool replace)
  567. {
  568. /*
  569. * Use assign system here so it wont get accounted as allocated
  570. * and moveable in the cpu hotplug check and it prevents managed
  571. * irq reservation from touching it.
  572. */
  573. irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
  574. }
  575. void __init lapic_assign_system_vectors(void)
  576. {
  577. unsigned int i, vector = 0;
  578. for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
  579. irq_matrix_assign_system(vector_matrix, vector, false);
  580. if (nr_legacy_irqs() > 1)
  581. lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
  582. /* System vectors are reserved, online it */
  583. irq_matrix_online(vector_matrix);
  584. /* Mark the preallocated legacy interrupts */
  585. for (i = 0; i < nr_legacy_irqs(); i++) {
  586. if (i != PIC_CASCADE_IR)
  587. irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
  588. }
  589. }
  590. int __init arch_early_irq_init(void)
  591. {
  592. struct fwnode_handle *fn;
  593. fn = irq_domain_alloc_named_fwnode("VECTOR");
  594. BUG_ON(!fn);
  595. x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
  596. NULL);
  597. BUG_ON(x86_vector_domain == NULL);
  598. irq_domain_free_fwnode(fn);
  599. irq_set_default_host(x86_vector_domain);
  600. arch_init_msi_domain(x86_vector_domain);
  601. BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
  602. /*
  603. * Allocate the vector matrix allocator data structure and limit the
  604. * search area.
  605. */
  606. vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
  607. FIRST_SYSTEM_VECTOR);
  608. BUG_ON(!vector_matrix);
  609. return arch_early_ioapic_init();
  610. }
  611. #ifdef CONFIG_SMP
  612. static struct irq_desc *__setup_vector_irq(int vector)
  613. {
  614. int isairq = vector - ISA_IRQ_VECTOR(0);
  615. /* Check whether the irq is in the legacy space */
  616. if (isairq < 0 || isairq >= nr_legacy_irqs())
  617. return VECTOR_UNUSED;
  618. /* Check whether the irq is handled by the IOAPIC */
  619. if (test_bit(isairq, &io_apic_irqs))
  620. return VECTOR_UNUSED;
  621. return irq_to_desc(isairq);
  622. }
  623. /* Online the local APIC infrastructure and initialize the vectors */
  624. void lapic_online(void)
  625. {
  626. unsigned int vector;
  627. lockdep_assert_held(&vector_lock);
  628. /* Online the vector matrix array for this CPU */
  629. irq_matrix_online(vector_matrix);
  630. /*
  631. * The interrupt affinity logic never targets interrupts to offline
  632. * CPUs. The exception are the legacy PIC interrupts. In general
  633. * they are only targeted to CPU0, but depending on the platform
  634. * they can be distributed to any online CPU in hardware. The
  635. * kernel has no influence on that. So all active legacy vectors
  636. * must be installed on all CPUs. All non legacy interrupts can be
  637. * cleared.
  638. */
  639. for (vector = 0; vector < NR_VECTORS; vector++)
  640. this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
  641. }
  642. void lapic_offline(void)
  643. {
  644. lock_vector_lock();
  645. irq_matrix_offline(vector_matrix);
  646. unlock_vector_lock();
  647. }
  648. static int apic_set_affinity(struct irq_data *irqd,
  649. const struct cpumask *dest, bool force)
  650. {
  651. struct apic_chip_data *apicd = apic_chip_data(irqd);
  652. int err;
  653. /*
  654. * Core code can call here for inactive interrupts. For inactive
  655. * interrupts which use managed or reservation mode there is no
  656. * point in going through the vector assignment right now as the
  657. * activation will assign a vector which fits the destination
  658. * cpumask. Let the core code store the destination mask and be
  659. * done with it.
  660. */
  661. if (!irqd_is_activated(irqd) &&
  662. (apicd->is_managed || apicd->can_reserve))
  663. return IRQ_SET_MASK_OK;
  664. raw_spin_lock(&vector_lock);
  665. cpumask_and(vector_searchmask, dest, cpu_online_mask);
  666. if (irqd_affinity_is_managed(irqd))
  667. err = assign_managed_vector(irqd, vector_searchmask);
  668. else
  669. err = assign_vector_locked(irqd, vector_searchmask);
  670. raw_spin_unlock(&vector_lock);
  671. return err ? err : IRQ_SET_MASK_OK;
  672. }
  673. #else
  674. # define apic_set_affinity NULL
  675. #endif
  676. static int apic_retrigger_irq(struct irq_data *irqd)
  677. {
  678. struct apic_chip_data *apicd = apic_chip_data(irqd);
  679. unsigned long flags;
  680. raw_spin_lock_irqsave(&vector_lock, flags);
  681. apic->send_IPI(apicd->cpu, apicd->vector);
  682. raw_spin_unlock_irqrestore(&vector_lock, flags);
  683. return 1;
  684. }
  685. void apic_ack_irq(struct irq_data *irqd)
  686. {
  687. irq_move_irq(irqd);
  688. ack_APIC_irq();
  689. }
  690. void apic_ack_edge(struct irq_data *irqd)
  691. {
  692. irq_complete_move(irqd_cfg(irqd));
  693. apic_ack_irq(irqd);
  694. }
  695. static struct irq_chip lapic_controller = {
  696. .name = "APIC",
  697. .irq_ack = apic_ack_edge,
  698. .irq_set_affinity = apic_set_affinity,
  699. .irq_retrigger = apic_retrigger_irq,
  700. };
  701. #ifdef CONFIG_SMP
  702. static void free_moved_vector(struct apic_chip_data *apicd)
  703. {
  704. unsigned int vector = apicd->prev_vector;
  705. unsigned int cpu = apicd->prev_cpu;
  706. bool managed = apicd->is_managed;
  707. /*
  708. * This should never happen. Managed interrupts are not
  709. * migrated except on CPU down, which does not involve the
  710. * cleanup vector. But try to keep the accounting correct
  711. * nevertheless.
  712. */
  713. WARN_ON_ONCE(managed);
  714. trace_vector_free_moved(apicd->irq, cpu, vector, managed);
  715. irq_matrix_free(vector_matrix, cpu, vector, managed);
  716. per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
  717. hlist_del_init(&apicd->clist);
  718. apicd->prev_vector = 0;
  719. apicd->move_in_progress = 0;
  720. }
  721. asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
  722. {
  723. struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
  724. struct apic_chip_data *apicd;
  725. struct hlist_node *tmp;
  726. entering_ack_irq();
  727. /* Prevent vectors vanishing under us */
  728. raw_spin_lock(&vector_lock);
  729. hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
  730. unsigned int irr, vector = apicd->prev_vector;
  731. /*
  732. * Paranoia: Check if the vector that needs to be cleaned
  733. * up is registered at the APICs IRR. If so, then this is
  734. * not the best time to clean it up. Clean it up in the
  735. * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
  736. * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest
  737. * priority external vector, so on return from this
  738. * interrupt the device interrupt will happen first.
  739. */
  740. irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
  741. if (irr & (1U << (vector % 32))) {
  742. apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
  743. continue;
  744. }
  745. free_moved_vector(apicd);
  746. }
  747. raw_spin_unlock(&vector_lock);
  748. exiting_irq();
  749. }
  750. static void __send_cleanup_vector(struct apic_chip_data *apicd)
  751. {
  752. unsigned int cpu;
  753. raw_spin_lock(&vector_lock);
  754. apicd->move_in_progress = 0;
  755. cpu = apicd->prev_cpu;
  756. if (cpu_online(cpu)) {
  757. hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
  758. apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
  759. } else {
  760. apicd->prev_vector = 0;
  761. }
  762. raw_spin_unlock(&vector_lock);
  763. }
  764. void send_cleanup_vector(struct irq_cfg *cfg)
  765. {
  766. struct apic_chip_data *apicd;
  767. apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
  768. if (apicd->move_in_progress)
  769. __send_cleanup_vector(apicd);
  770. }
  771. static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
  772. {
  773. struct apic_chip_data *apicd;
  774. apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
  775. if (likely(!apicd->move_in_progress))
  776. return;
  777. if (vector == apicd->vector && apicd->cpu == smp_processor_id())
  778. __send_cleanup_vector(apicd);
  779. }
  780. void irq_complete_move(struct irq_cfg *cfg)
  781. {
  782. __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
  783. }
  784. /*
  785. * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
  786. */
  787. void irq_force_complete_move(struct irq_desc *desc)
  788. {
  789. struct apic_chip_data *apicd;
  790. struct irq_data *irqd;
  791. unsigned int vector;
  792. /*
  793. * The function is called for all descriptors regardless of which
  794. * irqdomain they belong to. For example if an IRQ is provided by
  795. * an irq_chip as part of a GPIO driver, the chip data for that
  796. * descriptor is specific to the irq_chip in question.
  797. *
  798. * Check first that the chip_data is what we expect
  799. * (apic_chip_data) before touching it any further.
  800. */
  801. irqd = irq_domain_get_irq_data(x86_vector_domain,
  802. irq_desc_get_irq(desc));
  803. if (!irqd)
  804. return;
  805. raw_spin_lock(&vector_lock);
  806. apicd = apic_chip_data(irqd);
  807. if (!apicd)
  808. goto unlock;
  809. /*
  810. * If prev_vector is empty, no action required.
  811. */
  812. vector = apicd->prev_vector;
  813. if (!vector)
  814. goto unlock;
  815. /*
  816. * This is tricky. If the cleanup of the old vector has not been
  817. * done yet, then the following setaffinity call will fail with
  818. * -EBUSY. This can leave the interrupt in a stale state.
  819. *
  820. * All CPUs are stuck in stop machine with interrupts disabled so
  821. * calling __irq_complete_move() would be completely pointless.
  822. *
  823. * 1) The interrupt is in move_in_progress state. That means that we
  824. * have not seen an interrupt since the io_apic was reprogrammed to
  825. * the new vector.
  826. *
  827. * 2) The interrupt has fired on the new vector, but the cleanup IPIs
  828. * have not been processed yet.
  829. */
  830. if (apicd->move_in_progress) {
  831. /*
  832. * In theory there is a race:
  833. *
  834. * set_ioapic(new_vector) <-- Interrupt is raised before update
  835. * is effective, i.e. it's raised on
  836. * the old vector.
  837. *
  838. * So if the target cpu cannot handle that interrupt before
  839. * the old vector is cleaned up, we get a spurious interrupt
  840. * and in the worst case the ioapic irq line becomes stale.
  841. *
  842. * But in case of cpu hotplug this should be a non issue
  843. * because if the affinity update happens right before all
  844. * cpus rendevouz in stop machine, there is no way that the
  845. * interrupt can be blocked on the target cpu because all cpus
  846. * loops first with interrupts enabled in stop machine, so the
  847. * old vector is not yet cleaned up when the interrupt fires.
  848. *
  849. * So the only way to run into this issue is if the delivery
  850. * of the interrupt on the apic/system bus would be delayed
  851. * beyond the point where the target cpu disables interrupts
  852. * in stop machine. I doubt that it can happen, but at least
  853. * there is a theroretical chance. Virtualization might be
  854. * able to expose this, but AFAICT the IOAPIC emulation is not
  855. * as stupid as the real hardware.
  856. *
  857. * Anyway, there is nothing we can do about that at this point
  858. * w/o refactoring the whole fixup_irq() business completely.
  859. * We print at least the irq number and the old vector number,
  860. * so we have the necessary information when a problem in that
  861. * area arises.
  862. */
  863. pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
  864. irqd->irq, vector);
  865. }
  866. free_moved_vector(apicd);
  867. unlock:
  868. raw_spin_unlock(&vector_lock);
  869. }
  870. #ifdef CONFIG_HOTPLUG_CPU
  871. /*
  872. * Note, this is not accurate accounting, but at least good enough to
  873. * prevent that the actual interrupt move will run out of vectors.
  874. */
  875. int lapic_can_unplug_cpu(void)
  876. {
  877. unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
  878. int ret = 0;
  879. raw_spin_lock(&vector_lock);
  880. tomove = irq_matrix_allocated(vector_matrix);
  881. avl = irq_matrix_available(vector_matrix, true);
  882. if (avl < tomove) {
  883. pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
  884. cpu, tomove, avl);
  885. ret = -ENOSPC;
  886. goto out;
  887. }
  888. rsvd = irq_matrix_reserved(vector_matrix);
  889. if (avl < rsvd) {
  890. pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
  891. rsvd, avl);
  892. }
  893. out:
  894. raw_spin_unlock(&vector_lock);
  895. return ret;
  896. }
  897. #endif /* HOTPLUG_CPU */
  898. #endif /* SMP */
  899. static void __init print_APIC_field(int base)
  900. {
  901. int i;
  902. printk(KERN_DEBUG);
  903. for (i = 0; i < 8; i++)
  904. pr_cont("%08x", apic_read(base + i*0x10));
  905. pr_cont("\n");
  906. }
  907. static void __init print_local_APIC(void *dummy)
  908. {
  909. unsigned int i, v, ver, maxlvt;
  910. u64 icr;
  911. pr_debug("printing local APIC contents on CPU#%d/%d:\n",
  912. smp_processor_id(), hard_smp_processor_id());
  913. v = apic_read(APIC_ID);
  914. pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
  915. v = apic_read(APIC_LVR);
  916. pr_info("... APIC VERSION: %08x\n", v);
  917. ver = GET_APIC_VERSION(v);
  918. maxlvt = lapic_get_maxlvt();
  919. v = apic_read(APIC_TASKPRI);
  920. pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
  921. /* !82489DX */
  922. if (APIC_INTEGRATED(ver)) {
  923. if (!APIC_XAPIC(ver)) {
  924. v = apic_read(APIC_ARBPRI);
  925. pr_debug("... APIC ARBPRI: %08x (%02x)\n",
  926. v, v & APIC_ARBPRI_MASK);
  927. }
  928. v = apic_read(APIC_PROCPRI);
  929. pr_debug("... APIC PROCPRI: %08x\n", v);
  930. }
  931. /*
  932. * Remote read supported only in the 82489DX and local APIC for
  933. * Pentium processors.
  934. */
  935. if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
  936. v = apic_read(APIC_RRR);
  937. pr_debug("... APIC RRR: %08x\n", v);
  938. }
  939. v = apic_read(APIC_LDR);
  940. pr_debug("... APIC LDR: %08x\n", v);
  941. if (!x2apic_enabled()) {
  942. v = apic_read(APIC_DFR);
  943. pr_debug("... APIC DFR: %08x\n", v);
  944. }
  945. v = apic_read(APIC_SPIV);
  946. pr_debug("... APIC SPIV: %08x\n", v);
  947. pr_debug("... APIC ISR field:\n");
  948. print_APIC_field(APIC_ISR);
  949. pr_debug("... APIC TMR field:\n");
  950. print_APIC_field(APIC_TMR);
  951. pr_debug("... APIC IRR field:\n");
  952. print_APIC_field(APIC_IRR);
  953. /* !82489DX */
  954. if (APIC_INTEGRATED(ver)) {
  955. /* Due to the Pentium erratum 3AP. */
  956. if (maxlvt > 3)
  957. apic_write(APIC_ESR, 0);
  958. v = apic_read(APIC_ESR);
  959. pr_debug("... APIC ESR: %08x\n", v);
  960. }
  961. icr = apic_icr_read();
  962. pr_debug("... APIC ICR: %08x\n", (u32)icr);
  963. pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
  964. v = apic_read(APIC_LVTT);
  965. pr_debug("... APIC LVTT: %08x\n", v);
  966. if (maxlvt > 3) {
  967. /* PC is LVT#4. */
  968. v = apic_read(APIC_LVTPC);
  969. pr_debug("... APIC LVTPC: %08x\n", v);
  970. }
  971. v = apic_read(APIC_LVT0);
  972. pr_debug("... APIC LVT0: %08x\n", v);
  973. v = apic_read(APIC_LVT1);
  974. pr_debug("... APIC LVT1: %08x\n", v);
  975. if (maxlvt > 2) {
  976. /* ERR is LVT#3. */
  977. v = apic_read(APIC_LVTERR);
  978. pr_debug("... APIC LVTERR: %08x\n", v);
  979. }
  980. v = apic_read(APIC_TMICT);
  981. pr_debug("... APIC TMICT: %08x\n", v);
  982. v = apic_read(APIC_TMCCT);
  983. pr_debug("... APIC TMCCT: %08x\n", v);
  984. v = apic_read(APIC_TDCR);
  985. pr_debug("... APIC TDCR: %08x\n", v);
  986. if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
  987. v = apic_read(APIC_EFEAT);
  988. maxlvt = (v >> 16) & 0xff;
  989. pr_debug("... APIC EFEAT: %08x\n", v);
  990. v = apic_read(APIC_ECTRL);
  991. pr_debug("... APIC ECTRL: %08x\n", v);
  992. for (i = 0; i < maxlvt; i++) {
  993. v = apic_read(APIC_EILVTn(i));
  994. pr_debug("... APIC EILVT%d: %08x\n", i, v);
  995. }
  996. }
  997. pr_cont("\n");
  998. }
  999. static void __init print_local_APICs(int maxcpu)
  1000. {
  1001. int cpu;
  1002. if (!maxcpu)
  1003. return;
  1004. preempt_disable();
  1005. for_each_online_cpu(cpu) {
  1006. if (cpu >= maxcpu)
  1007. break;
  1008. smp_call_function_single(cpu, print_local_APIC, NULL, 1);
  1009. }
  1010. preempt_enable();
  1011. }
  1012. static void __init print_PIC(void)
  1013. {
  1014. unsigned int v;
  1015. unsigned long flags;
  1016. if (!nr_legacy_irqs())
  1017. return;
  1018. pr_debug("\nprinting PIC contents\n");
  1019. raw_spin_lock_irqsave(&i8259A_lock, flags);
  1020. v = inb(0xa1) << 8 | inb(0x21);
  1021. pr_debug("... PIC IMR: %04x\n", v);
  1022. v = inb(0xa0) << 8 | inb(0x20);
  1023. pr_debug("... PIC IRR: %04x\n", v);
  1024. outb(0x0b, 0xa0);
  1025. outb(0x0b, 0x20);
  1026. v = inb(0xa0) << 8 | inb(0x20);
  1027. outb(0x0a, 0xa0);
  1028. outb(0x0a, 0x20);
  1029. raw_spin_unlock_irqrestore(&i8259A_lock, flags);
  1030. pr_debug("... PIC ISR: %04x\n", v);
  1031. v = inb(0x4d1) << 8 | inb(0x4d0);
  1032. pr_debug("... PIC ELCR: %04x\n", v);
  1033. }
  1034. static int show_lapic __initdata = 1;
  1035. static __init int setup_show_lapic(char *arg)
  1036. {
  1037. int num = -1;
  1038. if (strcmp(arg, "all") == 0) {
  1039. show_lapic = CONFIG_NR_CPUS;
  1040. } else {
  1041. get_option(&arg, &num);
  1042. if (num >= 0)
  1043. show_lapic = num;
  1044. }
  1045. return 1;
  1046. }
  1047. __setup("show_lapic=", setup_show_lapic);
  1048. static int __init print_ICs(void)
  1049. {
  1050. if (apic_verbosity == APIC_QUIET)
  1051. return 0;
  1052. print_PIC();
  1053. /* don't print out if apic is not there */
  1054. if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
  1055. return 0;
  1056. print_local_APICs(show_lapic);
  1057. print_IO_APICs();
  1058. return 0;
  1059. }
  1060. late_initcall(print_ICs);