pcie-iproc-msi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015 Broadcom Corporation
  4. */
  5. #include <linux/interrupt.h>
  6. #include <linux/irqchip/chained_irq.h>
  7. #include <linux/irqdomain.h>
  8. #include <linux/msi.h>
  9. #include <linux/of_irq.h>
  10. #include <linux/of_pci.h>
  11. #include <linux/pci.h>
  12. #include "pcie-iproc.h"
  13. #define IPROC_MSI_INTR_EN_SHIFT 11
  14. #define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT)
  15. #define IPROC_MSI_INT_N_EVENT_SHIFT 1
  16. #define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
  17. #define IPROC_MSI_EQ_EN_SHIFT 0
  18. #define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT)
  19. #define IPROC_MSI_EQ_MASK 0x3f
  20. /* Max number of GIC interrupts */
  21. #define NR_HW_IRQS 6
  22. /* Number of entries in each event queue */
  23. #define EQ_LEN 64
  24. /* Size of each event queue memory region */
  25. #define EQ_MEM_REGION_SIZE SZ_4K
  26. /* Size of each MSI address region */
  27. #define MSI_MEM_REGION_SIZE SZ_4K
  28. enum iproc_msi_reg {
  29. IPROC_MSI_EQ_PAGE = 0,
  30. IPROC_MSI_EQ_PAGE_UPPER,
  31. IPROC_MSI_PAGE,
  32. IPROC_MSI_PAGE_UPPER,
  33. IPROC_MSI_CTRL,
  34. IPROC_MSI_EQ_HEAD,
  35. IPROC_MSI_EQ_TAIL,
  36. IPROC_MSI_INTS_EN,
  37. IPROC_MSI_REG_SIZE,
  38. };
  39. struct iproc_msi;
  40. /**
  41. * iProc MSI group
  42. *
  43. * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
  44. * event queue.
  45. *
  46. * @msi: pointer to iProc MSI data
  47. * @gic_irq: GIC interrupt
  48. * @eq: Event queue number
  49. */
  50. struct iproc_msi_grp {
  51. struct iproc_msi *msi;
  52. int gic_irq;
  53. unsigned int eq;
  54. };
  55. /**
  56. * iProc event queue based MSI
  57. *
  58. * Only meant to be used on platforms without MSI support integrated into the
  59. * GIC.
  60. *
  61. * @pcie: pointer to iProc PCIe data
  62. * @reg_offsets: MSI register offsets
  63. * @grps: MSI groups
  64. * @nr_irqs: number of total interrupts connected to GIC
  65. * @nr_cpus: number of toal CPUs
  66. * @has_inten_reg: indicates the MSI interrupt enable register needs to be
  67. * set explicitly (required for some legacy platforms)
  68. * @bitmap: MSI vector bitmap
  69. * @bitmap_lock: lock to protect access to the MSI bitmap
  70. * @nr_msi_vecs: total number of MSI vectors
  71. * @inner_domain: inner IRQ domain
  72. * @msi_domain: MSI IRQ domain
  73. * @nr_eq_region: required number of 4K aligned memory region for MSI event
  74. * queues
  75. * @nr_msi_region: required number of 4K aligned address region for MSI posted
  76. * writes
  77. * @eq_cpu: pointer to allocated memory region for MSI event queues
  78. * @eq_dma: DMA address of MSI event queues
  79. * @msi_addr: MSI address
  80. */
  81. struct iproc_msi {
  82. struct iproc_pcie *pcie;
  83. const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE];
  84. struct iproc_msi_grp *grps;
  85. int nr_irqs;
  86. int nr_cpus;
  87. bool has_inten_reg;
  88. unsigned long *bitmap;
  89. struct mutex bitmap_lock;
  90. unsigned int nr_msi_vecs;
  91. struct irq_domain *inner_domain;
  92. struct irq_domain *msi_domain;
  93. unsigned int nr_eq_region;
  94. unsigned int nr_msi_region;
  95. void *eq_cpu;
  96. dma_addr_t eq_dma;
  97. phys_addr_t msi_addr;
  98. };
  99. static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
  100. { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
  101. { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
  102. { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
  103. { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
  104. { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
  105. { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
  106. };
  107. static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
  108. { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
  109. { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
  110. { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
  111. { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
  112. };
  113. static inline u32 iproc_msi_read_reg(struct iproc_msi *msi,
  114. enum iproc_msi_reg reg,
  115. unsigned int eq)
  116. {
  117. struct iproc_pcie *pcie = msi->pcie;
  118. return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]);
  119. }
  120. static inline void iproc_msi_write_reg(struct iproc_msi *msi,
  121. enum iproc_msi_reg reg,
  122. int eq, u32 val)
  123. {
  124. struct iproc_pcie *pcie = msi->pcie;
  125. writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]);
  126. }
  127. static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq)
  128. {
  129. return (hwirq % msi->nr_irqs);
  130. }
  131. static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi,
  132. unsigned long hwirq)
  133. {
  134. if (msi->nr_msi_region > 1)
  135. return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE;
  136. else
  137. return hwirq_to_group(msi, hwirq) * sizeof(u32);
  138. }
  139. static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
  140. {
  141. if (msi->nr_eq_region > 1)
  142. return eq * EQ_MEM_REGION_SIZE;
  143. else
  144. return eq * EQ_LEN * sizeof(u32);
  145. }
  146. static struct irq_chip iproc_msi_irq_chip = {
  147. .name = "iProc-MSI",
  148. };
  149. static struct msi_domain_info iproc_msi_domain_info = {
  150. .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  151. MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
  152. .chip = &iproc_msi_irq_chip,
  153. };
  154. /*
  155. * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
  156. * dedicated event queue. Each MSI group can support up to 64 MSI vectors.
  157. *
  158. * The number of MSI groups varies between different iProc SoCs. The total
  159. * number of CPU cores also varies. To support MSI IRQ affinity, we
  160. * distribute GIC interrupts across all available CPUs. MSI vector is moved
  161. * from one GIC interrupt to another to steer to the target CPU.
  162. *
  163. * Assuming:
  164. * - the number of MSI groups is M
  165. * - the number of CPU cores is N
  166. * - M is always a multiple of N
  167. *
  168. * Total number of raw MSI vectors = M * 64
  169. * Total number of supported MSI vectors = (M * 64) / N
  170. */
  171. static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq)
  172. {
  173. return (hwirq % msi->nr_cpus);
  174. }
  175. static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi,
  176. unsigned long hwirq)
  177. {
  178. return (hwirq - hwirq_to_cpu(msi, hwirq));
  179. }
  180. static int iproc_msi_irq_set_affinity(struct irq_data *data,
  181. const struct cpumask *mask, bool force)
  182. {
  183. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  184. int target_cpu = cpumask_first(mask);
  185. int curr_cpu;
  186. curr_cpu = hwirq_to_cpu(msi, data->hwirq);
  187. if (curr_cpu == target_cpu)
  188. return IRQ_SET_MASK_OK_DONE;
  189. /* steer MSI to the target CPU */
  190. data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
  191. return IRQ_SET_MASK_OK;
  192. }
  193. static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
  194. struct msi_msg *msg)
  195. {
  196. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  197. dma_addr_t addr;
  198. addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
  199. msg->address_lo = lower_32_bits(addr);
  200. msg->address_hi = upper_32_bits(addr);
  201. msg->data = data->hwirq << 5;
  202. }
  203. static struct irq_chip iproc_msi_bottom_irq_chip = {
  204. .name = "MSI",
  205. .irq_set_affinity = iproc_msi_irq_set_affinity,
  206. .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg,
  207. };
  208. static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
  209. unsigned int virq, unsigned int nr_irqs,
  210. void *args)
  211. {
  212. struct iproc_msi *msi = domain->host_data;
  213. int hwirq, i;
  214. mutex_lock(&msi->bitmap_lock);
  215. /* Allocate 'nr_cpus' number of MSI vectors each time */
  216. hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0,
  217. msi->nr_cpus, 0);
  218. if (hwirq < msi->nr_msi_vecs) {
  219. bitmap_set(msi->bitmap, hwirq, msi->nr_cpus);
  220. } else {
  221. mutex_unlock(&msi->bitmap_lock);
  222. return -ENOSPC;
  223. }
  224. mutex_unlock(&msi->bitmap_lock);
  225. for (i = 0; i < nr_irqs; i++) {
  226. irq_domain_set_info(domain, virq + i, hwirq + i,
  227. &iproc_msi_bottom_irq_chip,
  228. domain->host_data, handle_simple_irq,
  229. NULL, NULL);
  230. }
  231. return hwirq;
  232. }
  233. static void iproc_msi_irq_domain_free(struct irq_domain *domain,
  234. unsigned int virq, unsigned int nr_irqs)
  235. {
  236. struct irq_data *data = irq_domain_get_irq_data(domain, virq);
  237. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  238. unsigned int hwirq;
  239. mutex_lock(&msi->bitmap_lock);
  240. hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
  241. bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus);
  242. mutex_unlock(&msi->bitmap_lock);
  243. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  244. }
  245. static const struct irq_domain_ops msi_domain_ops = {
  246. .alloc = iproc_msi_irq_domain_alloc,
  247. .free = iproc_msi_irq_domain_free,
  248. };
  249. static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
  250. {
  251. u32 *msg, hwirq;
  252. unsigned int offs;
  253. offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
  254. msg = (u32 *)(msi->eq_cpu + offs);
  255. hwirq = readl(msg);
  256. hwirq = (hwirq >> 5) + (hwirq & 0x1f);
  257. /*
  258. * Since we have multiple hwirq mapped to a single MSI vector,
  259. * now we need to derive the hwirq at CPU0. It can then be used to
  260. * mapped back to virq.
  261. */
  262. return hwirq_to_canonical_hwirq(msi, hwirq);
  263. }
  264. static void iproc_msi_handler(struct irq_desc *desc)
  265. {
  266. struct irq_chip *chip = irq_desc_get_chip(desc);
  267. struct iproc_msi_grp *grp;
  268. struct iproc_msi *msi;
  269. u32 eq, head, tail, nr_events;
  270. unsigned long hwirq;
  271. int virq;
  272. chained_irq_enter(chip, desc);
  273. grp = irq_desc_get_handler_data(desc);
  274. msi = grp->msi;
  275. eq = grp->eq;
  276. /*
  277. * iProc MSI event queue is tracked by head and tail pointers. Head
  278. * pointer indicates the next entry (MSI data) to be consumed by SW in
  279. * the queue and needs to be updated by SW. iProc MSI core uses the
  280. * tail pointer as the next data insertion point.
  281. *
  282. * Entries between head and tail pointers contain valid MSI data. MSI
  283. * data is guaranteed to be in the event queue memory before the tail
  284. * pointer is updated by the iProc MSI core.
  285. */
  286. head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD,
  287. eq) & IPROC_MSI_EQ_MASK;
  288. do {
  289. tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL,
  290. eq) & IPROC_MSI_EQ_MASK;
  291. /*
  292. * Figure out total number of events (MSI data) to be
  293. * processed.
  294. */
  295. nr_events = (tail < head) ?
  296. (EQ_LEN - (head - tail)) : (tail - head);
  297. if (!nr_events)
  298. break;
  299. /* process all outstanding events */
  300. while (nr_events--) {
  301. hwirq = decode_msi_hwirq(msi, eq, head);
  302. virq = irq_find_mapping(msi->inner_domain, hwirq);
  303. generic_handle_irq(virq);
  304. head++;
  305. head %= EQ_LEN;
  306. }
  307. /*
  308. * Now all outstanding events have been processed. Update the
  309. * head pointer.
  310. */
  311. iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head);
  312. /*
  313. * Now go read the tail pointer again to see if there are new
  314. * oustanding events that came in during the above window.
  315. */
  316. } while (true);
  317. chained_irq_exit(chip, desc);
  318. }
  319. static void iproc_msi_enable(struct iproc_msi *msi)
  320. {
  321. int i, eq;
  322. u32 val;
  323. /* Program memory region for each event queue */
  324. for (i = 0; i < msi->nr_eq_region; i++) {
  325. dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE);
  326. iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i,
  327. lower_32_bits(addr));
  328. iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i,
  329. upper_32_bits(addr));
  330. }
  331. /* Program address region for MSI posted writes */
  332. for (i = 0; i < msi->nr_msi_region; i++) {
  333. phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE);
  334. iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i,
  335. lower_32_bits(addr));
  336. iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i,
  337. upper_32_bits(addr));
  338. }
  339. for (eq = 0; eq < msi->nr_irqs; eq++) {
  340. /* Enable MSI event queue */
  341. val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
  342. IPROC_MSI_EQ_EN;
  343. iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
  344. /*
  345. * Some legacy platforms require the MSI interrupt enable
  346. * register to be set explicitly.
  347. */
  348. if (msi->has_inten_reg) {
  349. val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
  350. val |= BIT(eq);
  351. iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
  352. }
  353. }
  354. }
  355. static void iproc_msi_disable(struct iproc_msi *msi)
  356. {
  357. u32 eq, val;
  358. for (eq = 0; eq < msi->nr_irqs; eq++) {
  359. if (msi->has_inten_reg) {
  360. val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
  361. val &= ~BIT(eq);
  362. iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
  363. }
  364. val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq);
  365. val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
  366. IPROC_MSI_EQ_EN);
  367. iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
  368. }
  369. }
  370. static int iproc_msi_alloc_domains(struct device_node *node,
  371. struct iproc_msi *msi)
  372. {
  373. msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
  374. &msi_domain_ops, msi);
  375. if (!msi->inner_domain)
  376. return -ENOMEM;
  377. msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
  378. &iproc_msi_domain_info,
  379. msi->inner_domain);
  380. if (!msi->msi_domain) {
  381. irq_domain_remove(msi->inner_domain);
  382. return -ENOMEM;
  383. }
  384. return 0;
  385. }
  386. static void iproc_msi_free_domains(struct iproc_msi *msi)
  387. {
  388. if (msi->msi_domain)
  389. irq_domain_remove(msi->msi_domain);
  390. if (msi->inner_domain)
  391. irq_domain_remove(msi->inner_domain);
  392. }
  393. static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
  394. {
  395. int i;
  396. for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
  397. irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
  398. NULL, NULL);
  399. }
  400. }
  401. static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
  402. {
  403. int i, ret;
  404. cpumask_var_t mask;
  405. struct iproc_pcie *pcie = msi->pcie;
  406. for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
  407. irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
  408. iproc_msi_handler,
  409. &msi->grps[i]);
  410. /* Dedicate GIC interrupt to each CPU core */
  411. if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
  412. cpumask_clear(mask);
  413. cpumask_set_cpu(cpu, mask);
  414. ret = irq_set_affinity(msi->grps[i].gic_irq, mask);
  415. if (ret)
  416. dev_err(pcie->dev,
  417. "failed to set affinity for IRQ%d\n",
  418. msi->grps[i].gic_irq);
  419. free_cpumask_var(mask);
  420. } else {
  421. dev_err(pcie->dev, "failed to alloc CPU mask\n");
  422. ret = -EINVAL;
  423. }
  424. if (ret) {
  425. /* Free all configured/unconfigured IRQs */
  426. iproc_msi_irq_free(msi, cpu);
  427. return ret;
  428. }
  429. }
  430. return 0;
  431. }
  432. int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
  433. {
  434. struct iproc_msi *msi;
  435. int i, ret;
  436. unsigned int cpu;
  437. if (!of_device_is_compatible(node, "brcm,iproc-msi"))
  438. return -ENODEV;
  439. if (!of_find_property(node, "msi-controller", NULL))
  440. return -ENODEV;
  441. if (pcie->msi)
  442. return -EBUSY;
  443. msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
  444. if (!msi)
  445. return -ENOMEM;
  446. msi->pcie = pcie;
  447. pcie->msi = msi;
  448. msi->msi_addr = pcie->base_addr;
  449. mutex_init(&msi->bitmap_lock);
  450. msi->nr_cpus = num_possible_cpus();
  451. msi->nr_irqs = of_irq_count(node);
  452. if (!msi->nr_irqs) {
  453. dev_err(pcie->dev, "found no MSI GIC interrupt\n");
  454. return -ENODEV;
  455. }
  456. if (msi->nr_irqs > NR_HW_IRQS) {
  457. dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
  458. msi->nr_irqs);
  459. msi->nr_irqs = NR_HW_IRQS;
  460. }
  461. if (msi->nr_irqs < msi->nr_cpus) {
  462. dev_err(pcie->dev,
  463. "not enough GIC interrupts for MSI affinity\n");
  464. return -EINVAL;
  465. }
  466. if (msi->nr_irqs % msi->nr_cpus != 0) {
  467. msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
  468. dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
  469. msi->nr_irqs);
  470. }
  471. switch (pcie->type) {
  472. case IPROC_PCIE_PAXB_BCMA:
  473. case IPROC_PCIE_PAXB:
  474. msi->reg_offsets = iproc_msi_reg_paxb;
  475. msi->nr_eq_region = 1;
  476. msi->nr_msi_region = 1;
  477. break;
  478. case IPROC_PCIE_PAXC:
  479. msi->reg_offsets = iproc_msi_reg_paxc;
  480. msi->nr_eq_region = msi->nr_irqs;
  481. msi->nr_msi_region = msi->nr_irqs;
  482. break;
  483. default:
  484. dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
  485. return -EINVAL;
  486. }
  487. if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
  488. msi->has_inten_reg = true;
  489. msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
  490. msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
  491. sizeof(*msi->bitmap), GFP_KERNEL);
  492. if (!msi->bitmap)
  493. return -ENOMEM;
  494. msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
  495. GFP_KERNEL);
  496. if (!msi->grps)
  497. return -ENOMEM;
  498. for (i = 0; i < msi->nr_irqs; i++) {
  499. unsigned int irq = irq_of_parse_and_map(node, i);
  500. if (!irq) {
  501. dev_err(pcie->dev, "unable to parse/map interrupt\n");
  502. ret = -ENODEV;
  503. goto free_irqs;
  504. }
  505. msi->grps[i].gic_irq = irq;
  506. msi->grps[i].msi = msi;
  507. msi->grps[i].eq = i;
  508. }
  509. /* Reserve memory for event queue and make sure memories are zeroed */
  510. msi->eq_cpu = dma_zalloc_coherent(pcie->dev,
  511. msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  512. &msi->eq_dma, GFP_KERNEL);
  513. if (!msi->eq_cpu) {
  514. ret = -ENOMEM;
  515. goto free_irqs;
  516. }
  517. ret = iproc_msi_alloc_domains(node, msi);
  518. if (ret) {
  519. dev_err(pcie->dev, "failed to create MSI domains\n");
  520. goto free_eq_dma;
  521. }
  522. for_each_online_cpu(cpu) {
  523. ret = iproc_msi_irq_setup(msi, cpu);
  524. if (ret)
  525. goto free_msi_irq;
  526. }
  527. iproc_msi_enable(msi);
  528. return 0;
  529. free_msi_irq:
  530. for_each_online_cpu(cpu)
  531. iproc_msi_irq_free(msi, cpu);
  532. iproc_msi_free_domains(msi);
  533. free_eq_dma:
  534. dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  535. msi->eq_cpu, msi->eq_dma);
  536. free_irqs:
  537. for (i = 0; i < msi->nr_irqs; i++) {
  538. if (msi->grps[i].gic_irq)
  539. irq_dispose_mapping(msi->grps[i].gic_irq);
  540. }
  541. pcie->msi = NULL;
  542. return ret;
  543. }
  544. EXPORT_SYMBOL(iproc_msi_init);
  545. void iproc_msi_exit(struct iproc_pcie *pcie)
  546. {
  547. struct iproc_msi *msi = pcie->msi;
  548. unsigned int i, cpu;
  549. if (!msi)
  550. return;
  551. iproc_msi_disable(msi);
  552. for_each_online_cpu(cpu)
  553. iproc_msi_irq_free(msi, cpu);
  554. iproc_msi_free_domains(msi);
  555. dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  556. msi->eq_cpu, msi->eq_dma);
  557. for (i = 0; i < msi->nr_irqs; i++) {
  558. if (msi->grps[i].gic_irq)
  559. irq_dispose_mapping(msi->grps[i].gic_irq);
  560. }
  561. }
  562. EXPORT_SYMBOL(iproc_msi_exit);