iosapic.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132
  1. /*
  2. * I/O SAPIC support.
  3. *
  4. * Copyright (C) 1999 Intel Corp.
  5. * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  6. * Copyright (C) 2000-2002 J.I. Lee <jung-ik.lee@intel.com>
  7. * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
  8. * David Mosberger-Tang <davidm@hpl.hp.com>
  9. * Copyright (C) 1999 VA Linux Systems
  10. * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
  11. *
  12. * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O
  13. * APIC code. In particular, we now have separate
  14. * handlers for edge and level triggered
  15. * interrupts.
  16. * 00/10/27 Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector
  17. * allocation PCI to vector mapping, shared PCI
  18. * interrupts.
  19. * 00/10/27 D. Mosberger Document things a bit more to make them more
  20. * understandable. Clean up much of the old
  21. * IOSAPIC cruft.
  22. * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts
  23. * and fixes for ACPI S5(SoftOff) support.
  24. * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT
  25. * 02/01/07 E. Focht <efocht@ess.nec.de> Redirectable interrupt
  26. * vectors in iosapic_set_affinity(),
  27. * initializations for /proc/irq/#/smp_affinity
  28. * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
  29. * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq
  30. * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to
  31. * IOSAPIC mapping error
  32. * 02/07/29 T. Kochi Allocate interrupt vectors dynamically
  33. * 02/08/04 T. Kochi Cleaned up terminology (irq, global system
  34. * interrupt, vector, etc.)
  35. * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's
  36. * pci_irq code.
  37. * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC.
  38. * Remove iosapic_address & gsi_base from
  39. * external interfaces. Rationalize
  40. * __init/__devinit attributes.
  41. * 04/12/04 Ashok Raj <ashok.raj@intel.com> Intel Corporation 2004
  42. * Updated to work with irq migration necessary
  43. * for CPU Hotplug
  44. */
  45. /*
  46. * Here is what the interrupt logic between a PCI device and the kernel looks
  47. * like:
  48. *
  49. * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
  50. * INTD). The device is uniquely identified by its bus-, and slot-number
  51. * (the function number does not matter here because all functions share
  52. * the same interrupt lines).
  53. *
  54. * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
  55. * controller. Multiple interrupt lines may have to share the same
  56. * IOSAPIC pin (if they're level triggered and use the same polarity).
  57. * Each interrupt line has a unique Global System Interrupt (GSI) number
  58. * which can be calculated as the sum of the controller's base GSI number
  59. * and the IOSAPIC pin number to which the line connects.
  60. *
  61. * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
  62. * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then
  63. * sent to the CPU.
  64. *
  65. * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is
  66. * used as architecture-independent interrupt handling mechanism in Linux.
  67. * As an IRQ is a number, we have to have
  68. * IA-64 interrupt vector number <-> IRQ number mapping. On smaller
  69. * systems, we use one-to-one mapping between IA-64 vector and IRQ. A
  70. * platform can implement platform_irq_to_vector(irq) and
  71. * platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
  72. * Please see also arch/ia64/include/asm/hw_irq.h for those APIs.
  73. *
  74. * To sum up, there are three levels of mappings involved:
  75. *
  76. * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
  77. *
  78. * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
  79. * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
  80. * (isa_irq) is the only exception in this source code.
  81. */
  82. #include <linux/acpi.h>
  83. #include <linux/init.h>
  84. #include <linux/irq.h>
  85. #include <linux/kernel.h>
  86. #include <linux/list.h>
  87. #include <linux/pci.h>
  88. #include <linux/slab.h>
  89. #include <linux/smp.h>
  90. #include <linux/string.h>
  91. #include <linux/bootmem.h>
  92. #include <asm/delay.h>
  93. #include <asm/hw_irq.h>
  94. #include <asm/io.h>
  95. #include <asm/iosapic.h>
  96. #include <asm/machvec.h>
  97. #include <asm/processor.h>
  98. #include <asm/ptrace.h>
  99. #include <asm/system.h>
  100. #undef DEBUG_INTERRUPT_ROUTING
  101. #ifdef DEBUG_INTERRUPT_ROUTING
  102. #define DBG(fmt...) printk(fmt)
  103. #else
  104. #define DBG(fmt...)
  105. #endif
  106. static DEFINE_SPINLOCK(iosapic_lock);
  107. /*
  108. * These tables map IA-64 vectors to the IOSAPIC pin that generates this
  109. * vector.
  110. */
  111. #define NO_REF_RTE 0
  112. static struct iosapic {
  113. char __iomem *addr; /* base address of IOSAPIC */
  114. unsigned int gsi_base; /* GSI base */
  115. unsigned short num_rte; /* # of RTEs on this IOSAPIC */
  116. int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
  117. #ifdef CONFIG_NUMA
  118. unsigned short node; /* numa node association via pxm */
  119. #endif
  120. spinlock_t lock; /* lock for indirect reg access */
  121. } iosapic_lists[NR_IOSAPICS];
  122. struct iosapic_rte_info {
  123. struct list_head rte_list; /* RTEs sharing the same vector */
  124. char rte_index; /* IOSAPIC RTE index */
  125. int refcnt; /* reference counter */
  126. struct iosapic *iosapic;
  127. } ____cacheline_aligned;
  128. static struct iosapic_intr_info {
  129. struct list_head rtes; /* RTEs using this vector (empty =>
  130. * not an IOSAPIC interrupt) */
  131. int count; /* # of registered RTEs */
  132. u32 low32; /* current value of low word of
  133. * Redirection table entry */
  134. unsigned int dest; /* destination CPU physical ID */
  135. unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
  136. unsigned char polarity: 1; /* interrupt polarity
  137. * (see iosapic.h) */
  138. unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
  139. } iosapic_intr_info[NR_IRQS];
  140. static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
  141. static inline void
  142. iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
  143. {
  144. unsigned long flags;
  145. spin_lock_irqsave(&iosapic->lock, flags);
  146. __iosapic_write(iosapic->addr, reg, val);
  147. spin_unlock_irqrestore(&iosapic->lock, flags);
  148. }
  149. /*
  150. * Find an IOSAPIC associated with a GSI
  151. */
  152. static inline int
  153. find_iosapic (unsigned int gsi)
  154. {
  155. int i;
  156. for (i = 0; i < NR_IOSAPICS; i++) {
  157. if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
  158. iosapic_lists[i].num_rte)
  159. return i;
  160. }
  161. return -1;
  162. }
  163. static inline int __gsi_to_irq(unsigned int gsi)
  164. {
  165. int irq;
  166. struct iosapic_intr_info *info;
  167. struct iosapic_rte_info *rte;
  168. for (irq = 0; irq < NR_IRQS; irq++) {
  169. info = &iosapic_intr_info[irq];
  170. list_for_each_entry(rte, &info->rtes, rte_list)
  171. if (rte->iosapic->gsi_base + rte->rte_index == gsi)
  172. return irq;
  173. }
  174. return -1;
  175. }
  176. int
  177. gsi_to_irq (unsigned int gsi)
  178. {
  179. unsigned long flags;
  180. int irq;
  181. spin_lock_irqsave(&iosapic_lock, flags);
  182. irq = __gsi_to_irq(gsi);
  183. spin_unlock_irqrestore(&iosapic_lock, flags);
  184. return irq;
  185. }
  186. static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
  187. {
  188. struct iosapic_rte_info *rte;
  189. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
  190. if (rte->iosapic->gsi_base + rte->rte_index == gsi)
  191. return rte;
  192. return NULL;
  193. }
  194. static void
  195. set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
  196. {
  197. unsigned long pol, trigger, dmode;
  198. u32 low32, high32;
  199. int rte_index;
  200. char redir;
  201. struct iosapic_rte_info *rte;
  202. ia64_vector vector = irq_to_vector(irq);
  203. DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
  204. rte = find_rte(irq, gsi);
  205. if (!rte)
  206. return; /* not an IOSAPIC interrupt */
  207. rte_index = rte->rte_index;
  208. pol = iosapic_intr_info[irq].polarity;
  209. trigger = iosapic_intr_info[irq].trigger;
  210. dmode = iosapic_intr_info[irq].dmode;
  211. redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
  212. #ifdef CONFIG_SMP
  213. set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
  214. #endif
  215. low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
  216. (trigger << IOSAPIC_TRIGGER_SHIFT) |
  217. (dmode << IOSAPIC_DELIVERY_SHIFT) |
  218. ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
  219. vector);
  220. /* dest contains both id and eid */
  221. high32 = (dest << IOSAPIC_DEST_SHIFT);
  222. iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
  223. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  224. iosapic_intr_info[irq].low32 = low32;
  225. iosapic_intr_info[irq].dest = dest;
  226. }
  227. static void
  228. nop (struct irq_data *data)
  229. {
  230. /* do nothing... */
  231. }
  232. #ifdef CONFIG_KEXEC
  233. void
  234. kexec_disable_iosapic(void)
  235. {
  236. struct iosapic_intr_info *info;
  237. struct iosapic_rte_info *rte;
  238. ia64_vector vec;
  239. int irq;
  240. for (irq = 0; irq < NR_IRQS; irq++) {
  241. info = &iosapic_intr_info[irq];
  242. vec = irq_to_vector(irq);
  243. list_for_each_entry(rte, &info->rtes,
  244. rte_list) {
  245. iosapic_write(rte->iosapic,
  246. IOSAPIC_RTE_LOW(rte->rte_index),
  247. IOSAPIC_MASK|vec);
  248. iosapic_eoi(rte->iosapic->addr, vec);
  249. }
  250. }
  251. }
  252. #endif
  253. static void
  254. mask_irq (struct irq_data *data)
  255. {
  256. unsigned int irq = data->irq;
  257. u32 low32;
  258. int rte_index;
  259. struct iosapic_rte_info *rte;
  260. if (!iosapic_intr_info[irq].count)
  261. return; /* not an IOSAPIC interrupt! */
  262. /* set only the mask bit */
  263. low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
  264. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  265. rte_index = rte->rte_index;
  266. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  267. }
  268. }
  269. static void
  270. unmask_irq (struct irq_data *data)
  271. {
  272. unsigned int irq = data->irq;
  273. u32 low32;
  274. int rte_index;
  275. struct iosapic_rte_info *rte;
  276. if (!iosapic_intr_info[irq].count)
  277. return; /* not an IOSAPIC interrupt! */
  278. low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
  279. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  280. rte_index = rte->rte_index;
  281. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  282. }
  283. }
  284. static int
  285. iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
  286. bool force)
  287. {
  288. #ifdef CONFIG_SMP
  289. unsigned int irq = data->irq;
  290. u32 high32, low32;
  291. int cpu, dest, rte_index;
  292. int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
  293. struct iosapic_rte_info *rte;
  294. struct iosapic *iosapic;
  295. irq &= (~IA64_IRQ_REDIRECTED);
  296. cpu = cpumask_first_and(cpu_online_mask, mask);
  297. if (cpu >= nr_cpu_ids)
  298. return -1;
  299. if (irq_prepare_move(irq, cpu))
  300. return -1;
  301. dest = cpu_physical_id(cpu);
  302. if (!iosapic_intr_info[irq].count)
  303. return -1; /* not an IOSAPIC interrupt */
  304. set_irq_affinity_info(irq, dest, redir);
  305. /* dest contains both id and eid */
  306. high32 = dest << IOSAPIC_DEST_SHIFT;
  307. low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
  308. if (redir)
  309. /* change delivery mode to lowest priority */
  310. low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
  311. else
  312. /* change delivery mode to fixed */
  313. low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
  314. low32 &= IOSAPIC_VECTOR_MASK;
  315. low32 |= irq_to_vector(irq);
  316. iosapic_intr_info[irq].low32 = low32;
  317. iosapic_intr_info[irq].dest = dest;
  318. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  319. iosapic = rte->iosapic;
  320. rte_index = rte->rte_index;
  321. iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
  322. iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  323. }
  324. #endif
  325. return 0;
  326. }
  327. /*
  328. * Handlers for level-triggered interrupts.
  329. */
  330. static unsigned int
  331. iosapic_startup_level_irq (struct irq_data *data)
  332. {
  333. unmask_irq(data);
  334. return 0;
  335. }
  336. static void
  337. iosapic_unmask_level_irq (struct irq_data *data)
  338. {
  339. unsigned int irq = data->irq;
  340. ia64_vector vec = irq_to_vector(irq);
  341. struct iosapic_rte_info *rte;
  342. int do_unmask_irq = 0;
  343. irq_complete_move(irq);
  344. if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
  345. do_unmask_irq = 1;
  346. mask_irq(data);
  347. } else
  348. unmask_irq(data);
  349. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
  350. iosapic_eoi(rte->iosapic->addr, vec);
  351. if (unlikely(do_unmask_irq)) {
  352. move_masked_irq(irq);
  353. unmask_irq(data);
  354. }
  355. }
  356. #define iosapic_shutdown_level_irq mask_irq
  357. #define iosapic_enable_level_irq unmask_irq
  358. #define iosapic_disable_level_irq mask_irq
  359. #define iosapic_ack_level_irq nop
  360. static struct irq_chip irq_type_iosapic_level = {
  361. .name = "IO-SAPIC-level",
  362. .irq_startup = iosapic_startup_level_irq,
  363. .irq_shutdown = iosapic_shutdown_level_irq,
  364. .irq_enable = iosapic_enable_level_irq,
  365. .irq_disable = iosapic_disable_level_irq,
  366. .irq_ack = iosapic_ack_level_irq,
  367. .irq_mask = mask_irq,
  368. .irq_unmask = iosapic_unmask_level_irq,
  369. .irq_set_affinity = iosapic_set_affinity
  370. };
  371. /*
  372. * Handlers for edge-triggered interrupts.
  373. */
  374. static unsigned int
  375. iosapic_startup_edge_irq (struct irq_data *data)
  376. {
  377. unmask_irq(data);
  378. /*
  379. * IOSAPIC simply drops interrupts pended while the
  380. * corresponding pin was masked, so we can't know if an
  381. * interrupt is pending already. Let's hope not...
  382. */
  383. return 0;
  384. }
  385. static void
  386. iosapic_ack_edge_irq (struct irq_data *data)
  387. {
  388. unsigned int irq = data->irq;
  389. struct irq_desc *idesc = irq_desc + irq;
  390. irq_complete_move(irq);
  391. move_native_irq(irq);
  392. /*
  393. * Once we have recorded IRQ_PENDING already, we can mask the
  394. * interrupt for real. This prevents IRQ storms from unhandled
  395. * devices.
  396. */
  397. if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
  398. (IRQ_PENDING|IRQ_DISABLED))
  399. mask_irq(data);
  400. }
  401. #define iosapic_enable_edge_irq unmask_irq
  402. #define iosapic_disable_edge_irq nop
  403. static struct irq_chip irq_type_iosapic_edge = {
  404. .name = "IO-SAPIC-edge",
  405. .irq_startup = iosapic_startup_edge_irq,
  406. .irq_shutdown = iosapic_disable_edge_irq,
  407. .irq_enable = iosapic_enable_edge_irq,
  408. .irq_disable = iosapic_disable_edge_irq,
  409. .irq_ack = iosapic_ack_edge_irq,
  410. .irq_mask = mask_irq,
  411. .irq_unmask = unmask_irq,
  412. .irq_set_affinity = iosapic_set_affinity
  413. };
  414. static unsigned int
  415. iosapic_version (char __iomem *addr)
  416. {
  417. /*
  418. * IOSAPIC Version Register return 32 bit structure like:
  419. * {
  420. * unsigned int version : 8;
  421. * unsigned int reserved1 : 8;
  422. * unsigned int max_redir : 8;
  423. * unsigned int reserved2 : 8;
  424. * }
  425. */
  426. return __iosapic_read(addr, IOSAPIC_VERSION);
  427. }
  428. static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
  429. {
  430. int i, irq = -ENOSPC, min_count = -1;
  431. struct iosapic_intr_info *info;
  432. /*
  433. * shared vectors for edge-triggered interrupts are not
  434. * supported yet
  435. */
  436. if (trigger == IOSAPIC_EDGE)
  437. return -EINVAL;
  438. for (i = 0; i < NR_IRQS; i++) {
  439. info = &iosapic_intr_info[i];
  440. if (info->trigger == trigger && info->polarity == pol &&
  441. (info->dmode == IOSAPIC_FIXED ||
  442. info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
  443. can_request_irq(i, IRQF_SHARED)) {
  444. if (min_count == -1 || info->count < min_count) {
  445. irq = i;
  446. min_count = info->count;
  447. }
  448. }
  449. }
  450. return irq;
  451. }
  452. /*
  453. * if the given vector is already owned by other,
  454. * assign a new vector for the other and make the vector available
  455. */
  456. static void __init
  457. iosapic_reassign_vector (int irq)
  458. {
  459. int new_irq;
  460. if (iosapic_intr_info[irq].count) {
  461. new_irq = create_irq();
  462. if (new_irq < 0)
  463. panic("%s: out of interrupt vectors!\n", __func__);
  464. printk(KERN_INFO "Reassigning vector %d to %d\n",
  465. irq_to_vector(irq), irq_to_vector(new_irq));
  466. memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
  467. sizeof(struct iosapic_intr_info));
  468. INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
  469. list_move(iosapic_intr_info[irq].rtes.next,
  470. &iosapic_intr_info[new_irq].rtes);
  471. memset(&iosapic_intr_info[irq], 0,
  472. sizeof(struct iosapic_intr_info));
  473. iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
  474. INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
  475. }
  476. }
  477. static inline int irq_is_shared (int irq)
  478. {
  479. return (iosapic_intr_info[irq].count > 1);
  480. }
  481. struct irq_chip*
  482. ia64_native_iosapic_get_irq_chip(unsigned long trigger)
  483. {
  484. if (trigger == IOSAPIC_EDGE)
  485. return &irq_type_iosapic_edge;
  486. else
  487. return &irq_type_iosapic_level;
  488. }
  489. static int
  490. register_intr (unsigned int gsi, int irq, unsigned char delivery,
  491. unsigned long polarity, unsigned long trigger)
  492. {
  493. struct irq_desc *idesc;
  494. struct irq_chip *irq_type;
  495. int index;
  496. struct iosapic_rte_info *rte;
  497. index = find_iosapic(gsi);
  498. if (index < 0) {
  499. printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
  500. __func__, gsi);
  501. return -ENODEV;
  502. }
  503. rte = find_rte(irq, gsi);
  504. if (!rte) {
  505. rte = kzalloc(sizeof (*rte), GFP_ATOMIC);
  506. if (!rte) {
  507. printk(KERN_WARNING "%s: cannot allocate memory\n",
  508. __func__);
  509. return -ENOMEM;
  510. }
  511. rte->iosapic = &iosapic_lists[index];
  512. rte->rte_index = gsi - rte->iosapic->gsi_base;
  513. rte->refcnt++;
  514. list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
  515. iosapic_intr_info[irq].count++;
  516. iosapic_lists[index].rtes_inuse++;
  517. }
  518. else if (rte->refcnt == NO_REF_RTE) {
  519. struct iosapic_intr_info *info = &iosapic_intr_info[irq];
  520. if (info->count > 0 &&
  521. (info->trigger != trigger || info->polarity != polarity)){
  522. printk (KERN_WARNING
  523. "%s: cannot override the interrupt\n",
  524. __func__);
  525. return -EINVAL;
  526. }
  527. rte->refcnt++;
  528. iosapic_intr_info[irq].count++;
  529. iosapic_lists[index].rtes_inuse++;
  530. }
  531. iosapic_intr_info[irq].polarity = polarity;
  532. iosapic_intr_info[irq].dmode = delivery;
  533. iosapic_intr_info[irq].trigger = trigger;
  534. irq_type = iosapic_get_irq_chip(trigger);
  535. idesc = irq_desc + irq;
  536. if (irq_type != NULL && idesc->chip != irq_type) {
  537. if (idesc->chip != &no_irq_chip)
  538. printk(KERN_WARNING
  539. "%s: changing vector %d from %s to %s\n",
  540. __func__, irq_to_vector(irq),
  541. idesc->chip->name, irq_type->name);
  542. idesc->chip = irq_type;
  543. }
  544. if (trigger == IOSAPIC_EDGE)
  545. __set_irq_handler_unlocked(irq, handle_edge_irq);
  546. else
  547. __set_irq_handler_unlocked(irq, handle_level_irq);
  548. return 0;
  549. }
  550. static unsigned int
  551. get_target_cpu (unsigned int gsi, int irq)
  552. {
  553. #ifdef CONFIG_SMP
  554. static int cpu = -1;
  555. extern int cpe_vector;
  556. cpumask_t domain = irq_to_domain(irq);
  557. /*
  558. * In case of vector shared by multiple RTEs, all RTEs that
  559. * share the vector need to use the same destination CPU.
  560. */
  561. if (iosapic_intr_info[irq].count)
  562. return iosapic_intr_info[irq].dest;
  563. /*
  564. * If the platform supports redirection via XTP, let it
  565. * distribute interrupts.
  566. */
  567. if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  568. return cpu_physical_id(smp_processor_id());
  569. /*
  570. * Some interrupts (ACPI SCI, for instance) are registered
  571. * before the BSP is marked as online.
  572. */
  573. if (!cpu_online(smp_processor_id()))
  574. return cpu_physical_id(smp_processor_id());
  575. #ifdef CONFIG_ACPI
  576. if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
  577. return get_cpei_target_cpu();
  578. #endif
  579. #ifdef CONFIG_NUMA
  580. {
  581. int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
  582. const struct cpumask *cpu_mask;
  583. iosapic_index = find_iosapic(gsi);
  584. if (iosapic_index < 0 ||
  585. iosapic_lists[iosapic_index].node == MAX_NUMNODES)
  586. goto skip_numa_setup;
  587. cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
  588. num_cpus = 0;
  589. for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
  590. if (cpu_online(numa_cpu))
  591. num_cpus++;
  592. }
  593. if (!num_cpus)
  594. goto skip_numa_setup;
  595. /* Use irq assignment to distribute across cpus in node */
  596. cpu_index = irq % num_cpus;
  597. for_each_cpu_and(numa_cpu, cpu_mask, &domain)
  598. if (cpu_online(numa_cpu) && i++ >= cpu_index)
  599. break;
  600. if (numa_cpu < nr_cpu_ids)
  601. return cpu_physical_id(numa_cpu);
  602. }
  603. skip_numa_setup:
  604. #endif
  605. /*
  606. * Otherwise, round-robin interrupt vectors across all the
  607. * processors. (It'd be nice if we could be smarter in the
  608. * case of NUMA.)
  609. */
  610. do {
  611. if (++cpu >= nr_cpu_ids)
  612. cpu = 0;
  613. } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
  614. return cpu_physical_id(cpu);
  615. #else /* CONFIG_SMP */
  616. return cpu_physical_id(smp_processor_id());
  617. #endif
  618. }
  619. static inline unsigned char choose_dmode(void)
  620. {
  621. #ifdef CONFIG_SMP
  622. if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  623. return IOSAPIC_LOWEST_PRIORITY;
  624. #endif
  625. return IOSAPIC_FIXED;
  626. }
  627. /*
  628. * ACPI can describe IOSAPIC interrupts via static tables and namespace
  629. * methods. This provides an interface to register those interrupts and
  630. * program the IOSAPIC RTE.
  631. */
  632. int
  633. iosapic_register_intr (unsigned int gsi,
  634. unsigned long polarity, unsigned long trigger)
  635. {
  636. int irq, mask = 1, err;
  637. unsigned int dest;
  638. unsigned long flags;
  639. struct iosapic_rte_info *rte;
  640. u32 low32;
  641. unsigned char dmode;
  642. /*
  643. * If this GSI has already been registered (i.e., it's a
  644. * shared interrupt, or we lost a race to register it),
  645. * don't touch the RTE.
  646. */
  647. spin_lock_irqsave(&iosapic_lock, flags);
  648. irq = __gsi_to_irq(gsi);
  649. if (irq > 0) {
  650. rte = find_rte(irq, gsi);
  651. if(iosapic_intr_info[irq].count == 0) {
  652. assign_irq_vector(irq);
  653. dynamic_irq_init(irq);
  654. } else if (rte->refcnt != NO_REF_RTE) {
  655. rte->refcnt++;
  656. goto unlock_iosapic_lock;
  657. }
  658. } else
  659. irq = create_irq();
  660. /* If vector is running out, we try to find a sharable vector */
  661. if (irq < 0) {
  662. irq = iosapic_find_sharable_irq(trigger, polarity);
  663. if (irq < 0)
  664. goto unlock_iosapic_lock;
  665. }
  666. raw_spin_lock(&irq_desc[irq].lock);
  667. dest = get_target_cpu(gsi, irq);
  668. dmode = choose_dmode();
  669. err = register_intr(gsi, irq, dmode, polarity, trigger);
  670. if (err < 0) {
  671. raw_spin_unlock(&irq_desc[irq].lock);
  672. irq = err;
  673. goto unlock_iosapic_lock;
  674. }
  675. /*
  676. * If the vector is shared and already unmasked for other
  677. * interrupt sources, don't mask it.
  678. */
  679. low32 = iosapic_intr_info[irq].low32;
  680. if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
  681. mask = 0;
  682. set_rte(gsi, irq, dest, mask);
  683. printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
  684. gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  685. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  686. cpu_logical_id(dest), dest, irq_to_vector(irq));
  687. raw_spin_unlock(&irq_desc[irq].lock);
  688. unlock_iosapic_lock:
  689. spin_unlock_irqrestore(&iosapic_lock, flags);
  690. return irq;
  691. }
  692. void
  693. iosapic_unregister_intr (unsigned int gsi)
  694. {
  695. unsigned long flags;
  696. int irq, index;
  697. struct irq_desc *idesc;
  698. u32 low32;
  699. unsigned long trigger, polarity;
  700. unsigned int dest;
  701. struct iosapic_rte_info *rte;
  702. /*
  703. * If the irq associated with the gsi is not found,
  704. * iosapic_unregister_intr() is unbalanced. We need to check
  705. * this again after getting locks.
  706. */
  707. irq = gsi_to_irq(gsi);
  708. if (irq < 0) {
  709. printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
  710. gsi);
  711. WARN_ON(1);
  712. return;
  713. }
  714. spin_lock_irqsave(&iosapic_lock, flags);
  715. if ((rte = find_rte(irq, gsi)) == NULL) {
  716. printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
  717. gsi);
  718. WARN_ON(1);
  719. goto out;
  720. }
  721. if (--rte->refcnt > 0)
  722. goto out;
  723. idesc = irq_desc + irq;
  724. rte->refcnt = NO_REF_RTE;
  725. /* Mask the interrupt */
  726. low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
  727. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
  728. iosapic_intr_info[irq].count--;
  729. index = find_iosapic(gsi);
  730. iosapic_lists[index].rtes_inuse--;
  731. WARN_ON(iosapic_lists[index].rtes_inuse < 0);
  732. trigger = iosapic_intr_info[irq].trigger;
  733. polarity = iosapic_intr_info[irq].polarity;
  734. dest = iosapic_intr_info[irq].dest;
  735. printk(KERN_INFO
  736. "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
  737. gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  738. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  739. cpu_logical_id(dest), dest, irq_to_vector(irq));
  740. if (iosapic_intr_info[irq].count == 0) {
  741. #ifdef CONFIG_SMP
  742. /* Clear affinity */
  743. cpumask_setall(idesc->affinity);
  744. #endif
  745. /* Clear the interrupt information */
  746. iosapic_intr_info[irq].dest = 0;
  747. iosapic_intr_info[irq].dmode = 0;
  748. iosapic_intr_info[irq].polarity = 0;
  749. iosapic_intr_info[irq].trigger = 0;
  750. iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
  751. /* Destroy and reserve IRQ */
  752. destroy_and_reserve_irq(irq);
  753. }
  754. out:
  755. spin_unlock_irqrestore(&iosapic_lock, flags);
  756. }
  757. /*
  758. * ACPI calls this when it finds an entry for a platform interrupt.
  759. */
  760. int __init
  761. iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
  762. int iosapic_vector, u16 eid, u16 id,
  763. unsigned long polarity, unsigned long trigger)
  764. {
  765. static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
  766. unsigned char delivery;
  767. int irq, vector, mask = 0;
  768. unsigned int dest = ((id << 8) | eid) & 0xffff;
  769. switch (int_type) {
  770. case ACPI_INTERRUPT_PMI:
  771. irq = vector = iosapic_vector;
  772. bind_irq_vector(irq, vector, CPU_MASK_ALL);
  773. /*
  774. * since PMI vector is alloc'd by FW(ACPI) not by kernel,
  775. * we need to make sure the vector is available
  776. */
  777. iosapic_reassign_vector(irq);
  778. delivery = IOSAPIC_PMI;
  779. break;
  780. case ACPI_INTERRUPT_INIT:
  781. irq = create_irq();
  782. if (irq < 0)
  783. panic("%s: out of interrupt vectors!\n", __func__);
  784. vector = irq_to_vector(irq);
  785. delivery = IOSAPIC_INIT;
  786. break;
  787. case ACPI_INTERRUPT_CPEI:
  788. irq = vector = IA64_CPE_VECTOR;
  789. BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
  790. delivery = IOSAPIC_FIXED;
  791. mask = 1;
  792. break;
  793. default:
  794. printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
  795. int_type);
  796. return -1;
  797. }
  798. register_intr(gsi, irq, delivery, polarity, trigger);
  799. printk(KERN_INFO
  800. "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
  801. " vector %d\n",
  802. int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
  803. int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  804. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  805. cpu_logical_id(dest), dest, vector);
  806. set_rte(gsi, irq, dest, mask);
  807. return vector;
  808. }
  809. /*
  810. * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
  811. */
  812. void __devinit
  813. iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
  814. unsigned long polarity,
  815. unsigned long trigger)
  816. {
  817. int vector, irq;
  818. unsigned int dest = cpu_physical_id(smp_processor_id());
  819. unsigned char dmode;
  820. irq = vector = isa_irq_to_vector(isa_irq);
  821. BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
  822. dmode = choose_dmode();
  823. register_intr(gsi, irq, dmode, polarity, trigger);
  824. DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
  825. isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
  826. polarity == IOSAPIC_POL_HIGH ? "high" : "low",
  827. cpu_logical_id(dest), dest, vector);
  828. set_rte(gsi, irq, dest, 1);
  829. }
  830. void __init
  831. ia64_native_iosapic_pcat_compat_init(void)
  832. {
  833. if (pcat_compat) {
  834. /*
  835. * Disable the compatibility mode interrupts (8259 style),
  836. * needs IN/OUT support enabled.
  837. */
  838. printk(KERN_INFO
  839. "%s: Disabling PC-AT compatible 8259 interrupts\n",
  840. __func__);
  841. outb(0xff, 0xA1);
  842. outb(0xff, 0x21);
  843. }
  844. }
  845. void __init
  846. iosapic_system_init (int system_pcat_compat)
  847. {
  848. int irq;
  849. for (irq = 0; irq < NR_IRQS; ++irq) {
  850. iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
  851. /* mark as unused */
  852. INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
  853. iosapic_intr_info[irq].count = 0;
  854. }
  855. pcat_compat = system_pcat_compat;
  856. if (pcat_compat)
  857. iosapic_pcat_compat_init();
  858. }
  859. static inline int
  860. iosapic_alloc (void)
  861. {
  862. int index;
  863. for (index = 0; index < NR_IOSAPICS; index++)
  864. if (!iosapic_lists[index].addr)
  865. return index;
  866. printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
  867. return -1;
  868. }
  869. static inline void
  870. iosapic_free (int index)
  871. {
  872. memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
  873. }
  874. static inline int
  875. iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
  876. {
  877. int index;
  878. unsigned int gsi_end, base, end;
  879. /* check gsi range */
  880. gsi_end = gsi_base + ((ver >> 16) & 0xff);
  881. for (index = 0; index < NR_IOSAPICS; index++) {
  882. if (!iosapic_lists[index].addr)
  883. continue;
  884. base = iosapic_lists[index].gsi_base;
  885. end = base + iosapic_lists[index].num_rte - 1;
  886. if (gsi_end < base || end < gsi_base)
  887. continue; /* OK */
  888. return -EBUSY;
  889. }
  890. return 0;
  891. }
  892. int __devinit
  893. iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
  894. {
  895. int num_rte, err, index;
  896. unsigned int isa_irq, ver;
  897. char __iomem *addr;
  898. unsigned long flags;
  899. spin_lock_irqsave(&iosapic_lock, flags);
  900. index = find_iosapic(gsi_base);
  901. if (index >= 0) {
  902. spin_unlock_irqrestore(&iosapic_lock, flags);
  903. return -EBUSY;
  904. }
  905. addr = ioremap(phys_addr, 0);
  906. if (addr == NULL) {
  907. spin_unlock_irqrestore(&iosapic_lock, flags);
  908. return -ENOMEM;
  909. }
  910. ver = iosapic_version(addr);
  911. if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
  912. iounmap(addr);
  913. spin_unlock_irqrestore(&iosapic_lock, flags);
  914. return err;
  915. }
  916. /*
  917. * The MAX_REDIR register holds the highest input pin number
  918. * (starting from 0). We add 1 so that we can use it for
  919. * number of pins (= RTEs)
  920. */
  921. num_rte = ((ver >> 16) & 0xff) + 1;
  922. index = iosapic_alloc();
  923. iosapic_lists[index].addr = addr;
  924. iosapic_lists[index].gsi_base = gsi_base;
  925. iosapic_lists[index].num_rte = num_rte;
  926. #ifdef CONFIG_NUMA
  927. iosapic_lists[index].node = MAX_NUMNODES;
  928. #endif
  929. spin_lock_init(&iosapic_lists[index].lock);
  930. spin_unlock_irqrestore(&iosapic_lock, flags);
  931. if ((gsi_base == 0) && pcat_compat) {
  932. /*
  933. * Map the legacy ISA devices into the IOSAPIC data. Some of
  934. * these may get reprogrammed later on with data from the ACPI
  935. * Interrupt Source Override table.
  936. */
  937. for (isa_irq = 0; isa_irq < 16; ++isa_irq)
  938. iosapic_override_isa_irq(isa_irq, isa_irq,
  939. IOSAPIC_POL_HIGH,
  940. IOSAPIC_EDGE);
  941. }
  942. return 0;
  943. }
  944. #ifdef CONFIG_HOTPLUG
  945. int
  946. iosapic_remove (unsigned int gsi_base)
  947. {
  948. int index, err = 0;
  949. unsigned long flags;
  950. spin_lock_irqsave(&iosapic_lock, flags);
  951. index = find_iosapic(gsi_base);
  952. if (index < 0) {
  953. printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
  954. __func__, gsi_base);
  955. goto out;
  956. }
  957. if (iosapic_lists[index].rtes_inuse) {
  958. err = -EBUSY;
  959. printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
  960. __func__, gsi_base);
  961. goto out;
  962. }
  963. iounmap(iosapic_lists[index].addr);
  964. iosapic_free(index);
  965. out:
  966. spin_unlock_irqrestore(&iosapic_lock, flags);
  967. return err;
  968. }
  969. #endif /* CONFIG_HOTPLUG */
  970. #ifdef CONFIG_NUMA
  971. void __devinit
  972. map_iosapic_to_node(unsigned int gsi_base, int node)
  973. {
  974. int index;
  975. index = find_iosapic(gsi_base);
  976. if (index < 0) {
  977. printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
  978. __func__, gsi_base);
  979. return;
  980. }
  981. iosapic_lists[index].node = node;
  982. return;
  983. }
  984. #endif