irq-gic-v2m.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. /*
  2. * ARM GIC v2m MSI(-X) support
  3. * Support for Message Signaled Interrupts for systems that
  4. * implement ARM Generic Interrupt Controller: GICv2m.
  5. *
  6. * Copyright (C) 2014 Advanced Micro Devices, Inc.
  7. * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
  8. * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
  9. * Brandon Anderson <brandon.anderson@amd.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License version 2 as published
  13. * by the Free Software Foundation.
  14. */
  15. #define pr_fmt(fmt) "GICv2m: " fmt
  16. #include <linux/acpi.h>
  17. #include <linux/irq.h>
  18. #include <linux/irqdomain.h>
  19. #include <linux/kernel.h>
  20. #include <linux/msi.h>
  21. #include <linux/of_address.h>
  22. #include <linux/of_pci.h>
  23. #include <linux/slab.h>
  24. #include <linux/spinlock.h>
  25. /*
  26. * MSI_TYPER:
  27. * [31:26] Reserved
  28. * [25:16] lowest SPI assigned to MSI
  29. * [15:10] Reserved
  30. * [9:0] Numer of SPIs assigned to MSI
  31. */
  32. #define V2M_MSI_TYPER 0x008
  33. #define V2M_MSI_TYPER_BASE_SHIFT 16
  34. #define V2M_MSI_TYPER_BASE_MASK 0x3FF
  35. #define V2M_MSI_TYPER_NUM_MASK 0x3FF
  36. #define V2M_MSI_SETSPI_NS 0x040
  37. #define V2M_MIN_SPI 32
  38. #define V2M_MAX_SPI 1019
  39. #define V2M_MSI_IIDR 0xFCC
  40. #define V2M_MSI_TYPER_BASE_SPI(x) \
  41. (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
  42. #define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
  43. /* APM X-Gene with GICv2m MSI_IIDR register value */
  44. #define XGENE_GICV2M_MSI_IIDR 0x06000170
  45. /* List of flags for specific v2m implementation */
  46. #define GICV2M_NEEDS_SPI_OFFSET 0x00000001
  47. static LIST_HEAD(v2m_nodes);
  48. static DEFINE_SPINLOCK(v2m_lock);
  49. struct v2m_data {
  50. struct list_head entry;
  51. struct fwnode_handle *fwnode;
  52. struct resource res; /* GICv2m resource */
  53. void __iomem *base; /* GICv2m virt address */
  54. u32 spi_start; /* The SPI number that MSIs start */
  55. u32 nr_spis; /* The number of SPIs for MSIs */
  56. unsigned long *bm; /* MSI vector bitmap */
  57. u32 flags; /* v2m flags for specific implementation */
  58. };
  59. static void gicv2m_mask_msi_irq(struct irq_data *d)
  60. {
  61. pci_msi_mask_irq(d);
  62. irq_chip_mask_parent(d);
  63. }
  64. static void gicv2m_unmask_msi_irq(struct irq_data *d)
  65. {
  66. pci_msi_unmask_irq(d);
  67. irq_chip_unmask_parent(d);
  68. }
  69. static struct irq_chip gicv2m_msi_irq_chip = {
  70. .name = "MSI",
  71. .irq_mask = gicv2m_mask_msi_irq,
  72. .irq_unmask = gicv2m_unmask_msi_irq,
  73. .irq_eoi = irq_chip_eoi_parent,
  74. .irq_write_msi_msg = pci_msi_domain_write_msg,
  75. };
  76. static struct msi_domain_info gicv2m_msi_domain_info = {
  77. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  78. MSI_FLAG_PCI_MSIX),
  79. .chip = &gicv2m_msi_irq_chip,
  80. };
  81. static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  82. {
  83. struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
  84. phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
  85. msg->address_hi = upper_32_bits(addr);
  86. msg->address_lo = lower_32_bits(addr);
  87. msg->data = data->hwirq;
  88. if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
  89. msg->data -= v2m->spi_start;
  90. }
  91. static struct irq_chip gicv2m_irq_chip = {
  92. .name = "GICv2m",
  93. .irq_mask = irq_chip_mask_parent,
  94. .irq_unmask = irq_chip_unmask_parent,
  95. .irq_eoi = irq_chip_eoi_parent,
  96. .irq_set_affinity = irq_chip_set_affinity_parent,
  97. .irq_compose_msi_msg = gicv2m_compose_msi_msg,
  98. };
  99. static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
  100. unsigned int virq,
  101. irq_hw_number_t hwirq)
  102. {
  103. struct irq_fwspec fwspec;
  104. struct irq_data *d;
  105. int err;
  106. if (is_of_node(domain->parent->fwnode)) {
  107. fwspec.fwnode = domain->parent->fwnode;
  108. fwspec.param_count = 3;
  109. fwspec.param[0] = 0;
  110. fwspec.param[1] = hwirq - 32;
  111. fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
  112. } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
  113. fwspec.fwnode = domain->parent->fwnode;
  114. fwspec.param_count = 2;
  115. fwspec.param[0] = hwirq;
  116. fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
  117. } else {
  118. return -EINVAL;
  119. }
  120. err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
  121. if (err)
  122. return err;
  123. /* Configure the interrupt line to be edge */
  124. d = irq_domain_get_irq_data(domain->parent, virq);
  125. d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
  126. return 0;
  127. }
  128. static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
  129. {
  130. int pos;
  131. pos = hwirq - v2m->spi_start;
  132. if (pos < 0 || pos >= v2m->nr_spis) {
  133. pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
  134. return;
  135. }
  136. spin_lock(&v2m_lock);
  137. __clear_bit(pos, v2m->bm);
  138. spin_unlock(&v2m_lock);
  139. }
  140. static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  141. unsigned int nr_irqs, void *args)
  142. {
  143. struct v2m_data *v2m = NULL, *tmp;
  144. int hwirq, offset, err = 0;
  145. spin_lock(&v2m_lock);
  146. list_for_each_entry(tmp, &v2m_nodes, entry) {
  147. offset = find_first_zero_bit(tmp->bm, tmp->nr_spis);
  148. if (offset < tmp->nr_spis) {
  149. __set_bit(offset, tmp->bm);
  150. v2m = tmp;
  151. break;
  152. }
  153. }
  154. spin_unlock(&v2m_lock);
  155. if (!v2m)
  156. return -ENOSPC;
  157. hwirq = v2m->spi_start + offset;
  158. err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
  159. if (err) {
  160. gicv2m_unalloc_msi(v2m, hwirq);
  161. return err;
  162. }
  163. irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
  164. &gicv2m_irq_chip, v2m);
  165. return 0;
  166. }
  167. static void gicv2m_irq_domain_free(struct irq_domain *domain,
  168. unsigned int virq, unsigned int nr_irqs)
  169. {
  170. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  171. struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
  172. BUG_ON(nr_irqs != 1);
  173. gicv2m_unalloc_msi(v2m, d->hwirq);
  174. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  175. }
  176. static const struct irq_domain_ops gicv2m_domain_ops = {
  177. .alloc = gicv2m_irq_domain_alloc,
  178. .free = gicv2m_irq_domain_free,
  179. };
  180. static bool is_msi_spi_valid(u32 base, u32 num)
  181. {
  182. if (base < V2M_MIN_SPI) {
  183. pr_err("Invalid MSI base SPI (base:%u)\n", base);
  184. return false;
  185. }
  186. if ((num == 0) || (base + num > V2M_MAX_SPI)) {
  187. pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
  188. num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
  189. return false;
  190. }
  191. return true;
  192. }
  193. static struct irq_chip gicv2m_pmsi_irq_chip = {
  194. .name = "pMSI",
  195. };
  196. static struct msi_domain_ops gicv2m_pmsi_ops = {
  197. };
  198. static struct msi_domain_info gicv2m_pmsi_domain_info = {
  199. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
  200. .ops = &gicv2m_pmsi_ops,
  201. .chip = &gicv2m_pmsi_irq_chip,
  202. };
  203. static void gicv2m_teardown(void)
  204. {
  205. struct v2m_data *v2m, *tmp;
  206. list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
  207. list_del(&v2m->entry);
  208. kfree(v2m->bm);
  209. iounmap(v2m->base);
  210. of_node_put(to_of_node(v2m->fwnode));
  211. if (is_fwnode_irqchip(v2m->fwnode))
  212. irq_domain_free_fwnode(v2m->fwnode);
  213. kfree(v2m);
  214. }
  215. }
  216. static int gicv2m_allocate_domains(struct irq_domain *parent)
  217. {
  218. struct irq_domain *inner_domain, *pci_domain, *plat_domain;
  219. struct v2m_data *v2m;
  220. v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
  221. if (!v2m)
  222. return 0;
  223. inner_domain = irq_domain_create_tree(v2m->fwnode,
  224. &gicv2m_domain_ops, v2m);
  225. if (!inner_domain) {
  226. pr_err("Failed to create GICv2m domain\n");
  227. return -ENOMEM;
  228. }
  229. inner_domain->bus_token = DOMAIN_BUS_NEXUS;
  230. inner_domain->parent = parent;
  231. pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
  232. &gicv2m_msi_domain_info,
  233. inner_domain);
  234. plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
  235. &gicv2m_pmsi_domain_info,
  236. inner_domain);
  237. if (!pci_domain || !plat_domain) {
  238. pr_err("Failed to create MSI domains\n");
  239. if (plat_domain)
  240. irq_domain_remove(plat_domain);
  241. if (pci_domain)
  242. irq_domain_remove(pci_domain);
  243. irq_domain_remove(inner_domain);
  244. return -ENOMEM;
  245. }
  246. return 0;
  247. }
  248. static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
  249. u32 spi_start, u32 nr_spis,
  250. struct resource *res)
  251. {
  252. int ret;
  253. struct v2m_data *v2m;
  254. v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
  255. if (!v2m) {
  256. pr_err("Failed to allocate struct v2m_data.\n");
  257. return -ENOMEM;
  258. }
  259. INIT_LIST_HEAD(&v2m->entry);
  260. v2m->fwnode = fwnode;
  261. memcpy(&v2m->res, res, sizeof(struct resource));
  262. v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
  263. if (!v2m->base) {
  264. pr_err("Failed to map GICv2m resource\n");
  265. ret = -ENOMEM;
  266. goto err_free_v2m;
  267. }
  268. if (spi_start && nr_spis) {
  269. v2m->spi_start = spi_start;
  270. v2m->nr_spis = nr_spis;
  271. } else {
  272. u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
  273. v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
  274. v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
  275. }
  276. if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
  277. ret = -EINVAL;
  278. goto err_iounmap;
  279. }
  280. /*
  281. * APM X-Gene GICv2m implementation has an erratum where
  282. * the MSI data needs to be the offset from the spi_start
  283. * in order to trigger the correct MSI interrupt. This is
  284. * different from the standard GICv2m implementation where
  285. * the MSI data is the absolute value within the range from
  286. * spi_start to (spi_start + num_spis).
  287. */
  288. if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR)
  289. v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
  290. v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
  291. GFP_KERNEL);
  292. if (!v2m->bm) {
  293. ret = -ENOMEM;
  294. goto err_iounmap;
  295. }
  296. list_add_tail(&v2m->entry, &v2m_nodes);
  297. pr_info("range%pR, SPI[%d:%d]\n", res,
  298. v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
  299. return 0;
  300. err_iounmap:
  301. iounmap(v2m->base);
  302. err_free_v2m:
  303. kfree(v2m);
  304. return ret;
  305. }
  306. static struct of_device_id gicv2m_device_id[] = {
  307. { .compatible = "arm,gic-v2m-frame", },
  308. {},
  309. };
  310. static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
  311. struct irq_domain *parent)
  312. {
  313. int ret = 0;
  314. struct device_node *node = to_of_node(parent_handle);
  315. struct device_node *child;
  316. for (child = of_find_matching_node(node, gicv2m_device_id); child;
  317. child = of_find_matching_node(child, gicv2m_device_id)) {
  318. u32 spi_start = 0, nr_spis = 0;
  319. struct resource res;
  320. if (!of_find_property(child, "msi-controller", NULL))
  321. continue;
  322. ret = of_address_to_resource(child, 0, &res);
  323. if (ret) {
  324. pr_err("Failed to allocate v2m resource.\n");
  325. break;
  326. }
  327. if (!of_property_read_u32(child, "arm,msi-base-spi",
  328. &spi_start) &&
  329. !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
  330. pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
  331. spi_start, nr_spis);
  332. ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res);
  333. if (ret) {
  334. of_node_put(child);
  335. break;
  336. }
  337. }
  338. if (!ret)
  339. ret = gicv2m_allocate_domains(parent);
  340. if (ret)
  341. gicv2m_teardown();
  342. return ret;
  343. }
  344. #ifdef CONFIG_ACPI
  345. static int acpi_num_msi;
  346. static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
  347. {
  348. struct v2m_data *data;
  349. if (WARN_ON(acpi_num_msi <= 0))
  350. return NULL;
  351. /* We only return the fwnode of the first MSI frame. */
  352. data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
  353. if (!data)
  354. return NULL;
  355. return data->fwnode;
  356. }
  357. static int __init
  358. acpi_parse_madt_msi(struct acpi_subtable_header *header,
  359. const unsigned long end)
  360. {
  361. int ret;
  362. struct resource res;
  363. u32 spi_start = 0, nr_spis = 0;
  364. struct acpi_madt_generic_msi_frame *m;
  365. struct fwnode_handle *fwnode;
  366. m = (struct acpi_madt_generic_msi_frame *)header;
  367. if (BAD_MADT_ENTRY(m, end))
  368. return -EINVAL;
  369. res.start = m->base_address;
  370. res.end = m->base_address + SZ_4K - 1;
  371. res.flags = IORESOURCE_MEM;
  372. if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
  373. spi_start = m->spi_base;
  374. nr_spis = m->spi_count;
  375. pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
  376. spi_start, nr_spis);
  377. }
  378. fwnode = irq_domain_alloc_fwnode((void *)m->base_address);
  379. if (!fwnode) {
  380. pr_err("Unable to allocate GICv2m domain token\n");
  381. return -EINVAL;
  382. }
  383. ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res);
  384. if (ret)
  385. irq_domain_free_fwnode(fwnode);
  386. return ret;
  387. }
  388. static int __init gicv2m_acpi_init(struct irq_domain *parent)
  389. {
  390. int ret;
  391. if (acpi_num_msi > 0)
  392. return 0;
  393. acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
  394. acpi_parse_madt_msi, 0);
  395. if (acpi_num_msi <= 0)
  396. goto err_out;
  397. ret = gicv2m_allocate_domains(parent);
  398. if (ret)
  399. goto err_out;
  400. pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
  401. return 0;
  402. err_out:
  403. gicv2m_teardown();
  404. return -EINVAL;
  405. }
  406. #else /* CONFIG_ACPI */
  407. static int __init gicv2m_acpi_init(struct irq_domain *parent)
  408. {
  409. return -EINVAL;
  410. }
  411. #endif /* CONFIG_ACPI */
  412. int __init gicv2m_init(struct fwnode_handle *parent_handle,
  413. struct irq_domain *parent)
  414. {
  415. if (is_of_node(parent_handle))
  416. return gicv2m_of_init(parent_handle, parent);
  417. return gicv2m_acpi_init(parent);
  418. }