msi-xlp.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. /*
  2. * Copyright (c) 2003-2012 Broadcom Corporation
  3. * All Rights Reserved
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the Broadcom
  9. * license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or without
  12. * modification, are permitted provided that the following conditions
  13. * are met:
  14. *
  15. * 1. Redistributions of source code must retain the above copyright
  16. * notice, this list of conditions and the following disclaimer.
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in
  19. * the documentation and/or other materials provided with the
  20. * distribution.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
  23. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  24. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
  26. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  29. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  30. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  31. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  32. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <linux/types.h>
  35. #include <linux/pci.h>
  36. #include <linux/kernel.h>
  37. #include <linux/init.h>
  38. #include <linux/msi.h>
  39. #include <linux/mm.h>
  40. #include <linux/irq.h>
  41. #include <linux/irqdesc.h>
  42. #include <linux/console.h>
  43. #include <asm/io.h>
  44. #include <asm/netlogic/interrupt.h>
  45. #include <asm/netlogic/haldefs.h>
  46. #include <asm/netlogic/common.h>
  47. #include <asm/netlogic/mips-extns.h>
  48. #include <asm/netlogic/xlp-hal/iomap.h>
  49. #include <asm/netlogic/xlp-hal/xlp.h>
  50. #include <asm/netlogic/xlp-hal/pic.h>
  51. #include <asm/netlogic/xlp-hal/pcibus.h>
  52. #include <asm/netlogic/xlp-hal/bridge.h>
  53. #define XLP_MSIVEC_PER_LINK 32
  54. #define XLP_MSIXVEC_TOTAL (cpu_is_xlp9xx() ? 128 : 32)
  55. #define XLP_MSIXVEC_PER_LINK (cpu_is_xlp9xx() ? 32 : 8)
  56. /* 128 MSI irqs per node, mapped starting at NLM_MSI_VEC_BASE */
  57. static inline int nlm_link_msiirq(int link, int msivec)
  58. {
  59. return NLM_MSI_VEC_BASE + link * XLP_MSIVEC_PER_LINK + msivec;
  60. }
  61. /* get the link MSI vector from irq number */
  62. static inline int nlm_irq_msivec(int irq)
  63. {
  64. return (irq - NLM_MSI_VEC_BASE) % XLP_MSIVEC_PER_LINK;
  65. }
  66. /* get the link from the irq number */
  67. static inline int nlm_irq_msilink(int irq)
  68. {
  69. int total_msivec = XLP_MSIVEC_PER_LINK * PCIE_NLINKS;
  70. return ((irq - NLM_MSI_VEC_BASE) % total_msivec) /
  71. XLP_MSIVEC_PER_LINK;
  72. }
  73. /*
  74. * For XLP 8xx/4xx/3xx/2xx, only 32 MSI-X vectors are possible because
  75. * there are only 32 PIC interrupts for MSI. We split them statically
  76. * and use 8 MSI-X vectors per link - this keeps the allocation and
  77. * lookup simple.
  78. * On XLP 9xx, there are 32 vectors per link, and the interrupts are
  79. * not routed thru PIC, so we can use all 128 MSI-X vectors.
  80. */
  81. static inline int nlm_link_msixirq(int link, int bit)
  82. {
  83. return NLM_MSIX_VEC_BASE + link * XLP_MSIXVEC_PER_LINK + bit;
  84. }
  85. /* get the link MSI vector from irq number */
  86. static inline int nlm_irq_msixvec(int irq)
  87. {
  88. return (irq - NLM_MSIX_VEC_BASE) % XLP_MSIXVEC_TOTAL;
  89. }
  90. /* get the link from MSIX vec */
  91. static inline int nlm_irq_msixlink(int msixvec)
  92. {
  93. return msixvec / XLP_MSIXVEC_PER_LINK;
  94. }
  95. /*
  96. * Per link MSI and MSI-X information, set as IRQ handler data for
  97. * MSI and MSI-X interrupts.
  98. */
  99. struct xlp_msi_data {
  100. struct nlm_soc_info *node;
  101. uint64_t lnkbase;
  102. uint32_t msi_enabled_mask;
  103. uint32_t msi_alloc_mask;
  104. uint32_t msix_alloc_mask;
  105. spinlock_t msi_lock;
  106. };
  107. /*
  108. * MSI Chip definitions
  109. *
  110. * On XLP, there is a PIC interrupt associated with each PCIe link on the
  111. * chip (which appears as a PCI bridge to us). This gives us 32 MSI irqa
  112. * per link and 128 overall.
  113. *
  114. * When a device connected to the link raises a MSI interrupt, we get a
  115. * link interrupt and we then have to look at PCIE_MSI_STATUS register at
  116. * the bridge to map it to the IRQ
  117. */
  118. static void xlp_msi_enable(struct irq_data *d)
  119. {
  120. struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
  121. unsigned long flags;
  122. int vec;
  123. vec = nlm_irq_msivec(d->irq);
  124. spin_lock_irqsave(&md->msi_lock, flags);
  125. md->msi_enabled_mask |= 1u << vec;
  126. if (cpu_is_xlp9xx())
  127. nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
  128. md->msi_enabled_mask);
  129. else
  130. nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
  131. spin_unlock_irqrestore(&md->msi_lock, flags);
  132. }
  133. static void xlp_msi_disable(struct irq_data *d)
  134. {
  135. struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
  136. unsigned long flags;
  137. int vec;
  138. vec = nlm_irq_msivec(d->irq);
  139. spin_lock_irqsave(&md->msi_lock, flags);
  140. md->msi_enabled_mask &= ~(1u << vec);
  141. if (cpu_is_xlp9xx())
  142. nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
  143. md->msi_enabled_mask);
  144. else
  145. nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
  146. spin_unlock_irqrestore(&md->msi_lock, flags);
  147. }
  148. static void xlp_msi_mask_ack(struct irq_data *d)
  149. {
  150. struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
  151. int link, vec;
  152. link = nlm_irq_msilink(d->irq);
  153. vec = nlm_irq_msivec(d->irq);
  154. xlp_msi_disable(d);
  155. /* Ack MSI on bridge */
  156. if (cpu_is_xlp9xx())
  157. nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec);
  158. else
  159. nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec);
  160. /* Ack at eirr and PIC */
  161. ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link));
  162. if (cpu_is_xlp9xx())
  163. nlm_pic_ack(md->node->picbase,
  164. PIC_9XX_IRT_PCIE_LINK_INDEX(link));
  165. else
  166. nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link));
  167. }
  168. static struct irq_chip xlp_msi_chip = {
  169. .name = "XLP-MSI",
  170. .irq_enable = xlp_msi_enable,
  171. .irq_disable = xlp_msi_disable,
  172. .irq_mask_ack = xlp_msi_mask_ack,
  173. .irq_unmask = xlp_msi_enable,
  174. };
  175. /*
  176. * XLP8XX/4XX/3XX/2XX:
  177. * The MSI-X interrupt handling is different from MSI, there are 32 MSI-X
  178. * interrupts generated by the PIC and each of these correspond to a MSI-X
  179. * vector (0-31) that can be assigned.
  180. *
  181. * We divide the MSI-X vectors to 8 per link and do a per-link allocation
  182. *
  183. * XLP9XX:
  184. * 32 MSI-X vectors are available per link, and the interrupts are not routed
  185. * thru the PIC. PIC ack not needed.
  186. *
  187. * Enable and disable done using standard MSI functions.
  188. */
  189. static void xlp_msix_mask_ack(struct irq_data *d)
  190. {
  191. struct xlp_msi_data *md;
  192. int link, msixvec;
  193. uint32_t status_reg, bit;
  194. msixvec = nlm_irq_msixvec(d->irq);
  195. link = nlm_irq_msixlink(msixvec);
  196. mask_msi_irq(d);
  197. md = irq_data_get_irq_handler_data(d);
  198. /* Ack MSI on bridge */
  199. if (cpu_is_xlp9xx()) {
  200. status_reg = PCIE_9XX_MSIX_STATUSX(link);
  201. bit = msixvec % XLP_MSIXVEC_PER_LINK;
  202. } else {
  203. status_reg = PCIE_MSIX_STATUS;
  204. bit = msixvec;
  205. }
  206. nlm_write_reg(md->lnkbase, status_reg, 1u << bit);
  207. /* Ack at eirr and PIC */
  208. ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link));
  209. if (!cpu_is_xlp9xx())
  210. nlm_pic_ack(md->node->picbase,
  211. PIC_IRT_PCIE_MSIX_INDEX(msixvec));
  212. }
  213. static struct irq_chip xlp_msix_chip = {
  214. .name = "XLP-MSIX",
  215. .irq_enable = unmask_msi_irq,
  216. .irq_disable = mask_msi_irq,
  217. .irq_mask_ack = xlp_msix_mask_ack,
  218. .irq_unmask = unmask_msi_irq,
  219. };
  220. void arch_teardown_msi_irq(unsigned int irq)
  221. {
  222. }
  223. /*
  224. * Setup a PCIe link for MSI. By default, the links are in
  225. * legacy interrupt mode. We will switch them to MSI mode
  226. * at the first MSI request.
  227. */
  228. static void xlp_config_link_msi(uint64_t lnkbase, int lirq, uint64_t msiaddr)
  229. {
  230. u32 val;
  231. if (cpu_is_xlp9xx()) {
  232. val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0);
  233. if ((val & 0x200) == 0) {
  234. val |= 0x200; /* MSI Interrupt enable */
  235. nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val);
  236. }
  237. } else {
  238. val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
  239. if ((val & 0x200) == 0) {
  240. val |= 0x200;
  241. nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
  242. }
  243. }
  244. val = nlm_read_reg(lnkbase, 0x1); /* CMD */
  245. if ((val & 0x0400) == 0) {
  246. val |= 0x0400;
  247. nlm_write_reg(lnkbase, 0x1, val);
  248. }
  249. /* Update IRQ in the PCI irq reg */
  250. val = nlm_read_pci_reg(lnkbase, 0xf);
  251. val &= ~0x1fu;
  252. val |= (1 << 8) | lirq;
  253. nlm_write_pci_reg(lnkbase, 0xf, val);
  254. /* MSI addr */
  255. nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRH, msiaddr >> 32);
  256. nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRL, msiaddr & 0xffffffff);
  257. /* MSI cap for bridge */
  258. val = nlm_read_reg(lnkbase, PCIE_BRIDGE_MSI_CAP);
  259. if ((val & (1 << 16)) == 0) {
  260. val |= 0xb << 16; /* mmc32, msi enable */
  261. nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_CAP, val);
  262. }
  263. }
  264. /*
  265. * Allocate a MSI vector on a link
  266. */
  267. static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
  268. struct msi_desc *desc)
  269. {
  270. struct xlp_msi_data *md;
  271. struct msi_msg msg;
  272. unsigned long flags;
  273. int msivec, irt, lirq, xirq, ret;
  274. uint64_t msiaddr;
  275. /* Get MSI data for the link */
  276. lirq = PIC_PCIE_LINK_MSI_IRQ(link);
  277. xirq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
  278. md = irq_get_handler_data(xirq);
  279. msiaddr = MSI_LINK_ADDR(node, link);
  280. spin_lock_irqsave(&md->msi_lock, flags);
  281. if (md->msi_alloc_mask == 0) {
  282. xlp_config_link_msi(lnkbase, lirq, msiaddr);
  283. /* switch the link IRQ to MSI range */
  284. if (cpu_is_xlp9xx())
  285. irt = PIC_9XX_IRT_PCIE_LINK_INDEX(link);
  286. else
  287. irt = PIC_IRT_PCIE_LINK_INDEX(link);
  288. nlm_setup_pic_irq(node, lirq, lirq, irt);
  289. nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq,
  290. node * nlm_threads_per_node(), 1 /*en */);
  291. }
  292. /* allocate a MSI vec, and tell the bridge about it */
  293. msivec = fls(md->msi_alloc_mask);
  294. if (msivec == XLP_MSIVEC_PER_LINK) {
  295. spin_unlock_irqrestore(&md->msi_lock, flags);
  296. return -ENOMEM;
  297. }
  298. md->msi_alloc_mask |= (1u << msivec);
  299. spin_unlock_irqrestore(&md->msi_lock, flags);
  300. msg.address_hi = msiaddr >> 32;
  301. msg.address_lo = msiaddr & 0xffffffff;
  302. msg.data = 0xc00 | msivec;
  303. xirq = xirq + msivec; /* msi mapped to global irq space */
  304. ret = irq_set_msi_desc(xirq, desc);
  305. if (ret < 0)
  306. return ret;
  307. write_msi_msg(xirq, &msg);
  308. return 0;
  309. }
  310. /*
  311. * Switch a link to MSI-X mode
  312. */
  313. static void xlp_config_link_msix(uint64_t lnkbase, int lirq, uint64_t msixaddr)
  314. {
  315. u32 val;
  316. val = nlm_read_reg(lnkbase, 0x2C);
  317. if ((val & 0x80000000U) == 0) {
  318. val |= 0x80000000U;
  319. nlm_write_reg(lnkbase, 0x2C, val);
  320. }
  321. if (cpu_is_xlp9xx()) {
  322. val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0);
  323. if ((val & 0x200) == 0) {
  324. val |= 0x200; /* MSI Interrupt enable */
  325. nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val);
  326. }
  327. } else {
  328. val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
  329. if ((val & 0x200) == 0) {
  330. val |= 0x200; /* MSI Interrupt enable */
  331. nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
  332. }
  333. }
  334. val = nlm_read_reg(lnkbase, 0x1); /* CMD */
  335. if ((val & 0x0400) == 0) {
  336. val |= 0x0400;
  337. nlm_write_reg(lnkbase, 0x1, val);
  338. }
  339. /* Update IRQ in the PCI irq reg */
  340. val = nlm_read_pci_reg(lnkbase, 0xf);
  341. val &= ~0x1fu;
  342. val |= (1 << 8) | lirq;
  343. nlm_write_pci_reg(lnkbase, 0xf, val);
  344. if (cpu_is_xlp9xx()) {
  345. /* MSI-X addresses */
  346. nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_BASE,
  347. msixaddr >> 8);
  348. nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT,
  349. (msixaddr + MSI_ADDR_SZ) >> 8);
  350. } else {
  351. /* MSI-X addresses */
  352. nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE,
  353. msixaddr >> 8);
  354. nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT,
  355. (msixaddr + MSI_ADDR_SZ) >> 8);
  356. }
  357. }
  358. /*
  359. * Allocate a MSI-X vector
  360. */
  361. static int xlp_setup_msix(uint64_t lnkbase, int node, int link,
  362. struct msi_desc *desc)
  363. {
  364. struct xlp_msi_data *md;
  365. struct msi_msg msg;
  366. unsigned long flags;
  367. int t, msixvec, lirq, xirq, ret;
  368. uint64_t msixaddr;
  369. /* Get MSI data for the link */
  370. lirq = PIC_PCIE_MSIX_IRQ(link);
  371. xirq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
  372. md = irq_get_handler_data(xirq);
  373. msixaddr = MSIX_LINK_ADDR(node, link);
  374. spin_lock_irqsave(&md->msi_lock, flags);
  375. /* switch the PCIe link to MSI-X mode at the first alloc */
  376. if (md->msix_alloc_mask == 0)
  377. xlp_config_link_msix(lnkbase, lirq, msixaddr);
  378. /* allocate a MSI-X vec, and tell the bridge about it */
  379. t = fls(md->msix_alloc_mask);
  380. if (t == XLP_MSIXVEC_PER_LINK) {
  381. spin_unlock_irqrestore(&md->msi_lock, flags);
  382. return -ENOMEM;
  383. }
  384. md->msix_alloc_mask |= (1u << t);
  385. spin_unlock_irqrestore(&md->msi_lock, flags);
  386. xirq += t;
  387. msixvec = nlm_irq_msixvec(xirq);
  388. msg.address_hi = msixaddr >> 32;
  389. msg.address_lo = msixaddr & 0xffffffff;
  390. msg.data = 0xc00 | msixvec;
  391. ret = irq_set_msi_desc(xirq, desc);
  392. if (ret < 0) {
  393. destroy_irq(xirq);
  394. return ret;
  395. }
  396. write_msi_msg(xirq, &msg);
  397. return 0;
  398. }
  399. int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
  400. {
  401. struct pci_dev *lnkdev;
  402. uint64_t lnkbase;
  403. int node, link, slot;
  404. lnkdev = xlp_get_pcie_link(dev);
  405. if (lnkdev == NULL) {
  406. dev_err(&dev->dev, "Could not find bridge\n");
  407. return 1;
  408. }
  409. slot = PCI_SLOT(lnkdev->devfn);
  410. link = PCI_FUNC(lnkdev->devfn);
  411. node = slot / 8;
  412. lnkbase = nlm_get_pcie_base(node, link);
  413. if (desc->msi_attrib.is_msix)
  414. return xlp_setup_msix(lnkbase, node, link, desc);
  415. else
  416. return xlp_setup_msi(lnkbase, node, link, desc);
  417. }
  418. void __init xlp_init_node_msi_irqs(int node, int link)
  419. {
  420. struct nlm_soc_info *nodep;
  421. struct xlp_msi_data *md;
  422. int irq, i, irt, msixvec, val;
  423. pr_info("[%d %d] Init node PCI IRT\n", node, link);
  424. nodep = nlm_get_node(node);
  425. /* Alloc an MSI block for the link */
  426. md = kzalloc(sizeof(*md), GFP_KERNEL);
  427. spin_lock_init(&md->msi_lock);
  428. md->msi_enabled_mask = 0;
  429. md->msi_alloc_mask = 0;
  430. md->msix_alloc_mask = 0;
  431. md->node = nodep;
  432. md->lnkbase = nlm_get_pcie_base(node, link);
  433. /* extended space for MSI interrupts */
  434. irq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
  435. for (i = irq; i < irq + XLP_MSIVEC_PER_LINK; i++) {
  436. irq_set_chip_and_handler(i, &xlp_msi_chip, handle_level_irq);
  437. irq_set_handler_data(i, md);
  438. }
  439. for (i = 0; i < XLP_MSIXVEC_PER_LINK ; i++) {
  440. if (cpu_is_xlp9xx()) {
  441. val = ((node * nlm_threads_per_node()) << 7 |
  442. PIC_PCIE_MSIX_IRQ(link) << 1 | 0 << 0);
  443. nlm_write_pcie_reg(md->lnkbase, PCIE_9XX_MSIX_VECX(i +
  444. (link * XLP_MSIXVEC_PER_LINK)), val);
  445. } else {
  446. /* Initialize MSI-X irts to generate one interrupt
  447. * per link
  448. */
  449. msixvec = link * XLP_MSIXVEC_PER_LINK + i;
  450. irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec);
  451. nlm_pic_init_irt(nodep->picbase, irt,
  452. PIC_PCIE_MSIX_IRQ(link),
  453. node * nlm_threads_per_node(), 1);
  454. }
  455. /* Initialize MSI-X extended irq space for the link */
  456. irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i));
  457. irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq);
  458. irq_set_handler_data(irq, md);
  459. }
  460. }
  461. void nlm_dispatch_msi(int node, int lirq)
  462. {
  463. struct xlp_msi_data *md;
  464. int link, i, irqbase;
  465. u32 status;
  466. link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE;
  467. irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
  468. md = irq_get_handler_data(irqbase);
  469. if (cpu_is_xlp9xx())
  470. status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) &
  471. md->msi_enabled_mask;
  472. else
  473. status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) &
  474. md->msi_enabled_mask;
  475. while (status) {
  476. i = __ffs(status);
  477. do_IRQ(irqbase + i);
  478. status &= status - 1;
  479. }
  480. }
  481. void nlm_dispatch_msix(int node, int lirq)
  482. {
  483. struct xlp_msi_data *md;
  484. int link, i, irqbase;
  485. u32 status;
  486. link = lirq - PIC_PCIE_MSIX_IRQ_BASE;
  487. irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
  488. md = irq_get_handler_data(irqbase);
  489. if (cpu_is_xlp9xx())
  490. status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link));
  491. else
  492. status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS);
  493. /* narrow it down to the MSI-x vectors for our link */
  494. if (!cpu_is_xlp9xx())
  495. status = (status >> (link * XLP_MSIXVEC_PER_LINK)) &
  496. ((1 << XLP_MSIXVEC_PER_LINK) - 1);
  497. while (status) {
  498. i = __ffs(status);
  499. do_IRQ(irqbase + i);
  500. status &= status - 1;
  501. }
  502. }