pci-cxl.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /*
  2. * Copyright 2014-2016 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/msi.h>
  11. #include <asm/pci-bridge.h>
  12. #include <asm/pnv-pci.h>
  13. #include <asm/opal.h>
  14. #include <misc/cxl.h>
  15. #include "pci.h"
  16. int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
  17. {
  18. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  19. struct pnv_phb *phb = hose->private_data;
  20. struct pnv_ioda_pe *pe;
  21. int rc;
  22. pe = pnv_ioda_get_pe(dev);
  23. if (!pe)
  24. return -ENODEV;
  25. pe_info(pe, "Switching PHB to CXL\n");
  26. rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
  27. if (rc == OPAL_UNSUPPORTED)
  28. dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n");
  29. else if (rc)
  30. dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
  31. return rc;
  32. }
  33. EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
  34. /* Find PHB for cxl dev and allocate MSI hwirqs?
  35. * Returns the absolute hardware IRQ number
  36. */
  37. int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
  38. {
  39. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  40. struct pnv_phb *phb = hose->private_data;
  41. int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
  42. if (hwirq < 0) {
  43. dev_warn(&dev->dev, "Failed to find a free MSI\n");
  44. return -ENOSPC;
  45. }
  46. return phb->msi_base + hwirq;
  47. }
  48. EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
  49. void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
  50. {
  51. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  52. struct pnv_phb *phb = hose->private_data;
  53. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
  54. }
  55. EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
  56. void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
  57. struct pci_dev *dev)
  58. {
  59. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  60. struct pnv_phb *phb = hose->private_data;
  61. int i, hwirq;
  62. for (i = 1; i < CXL_IRQ_RANGES; i++) {
  63. if (!irqs->range[i])
  64. continue;
  65. pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
  66. i, irqs->offset[i],
  67. irqs->range[i]);
  68. hwirq = irqs->offset[i] - phb->msi_base;
  69. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
  70. irqs->range[i]);
  71. }
  72. }
  73. EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
  74. int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
  75. struct pci_dev *dev, int num)
  76. {
  77. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  78. struct pnv_phb *phb = hose->private_data;
  79. int i, hwirq, try;
  80. memset(irqs, 0, sizeof(struct cxl_irq_ranges));
  81. /* 0 is reserved for the multiplexed PSL DSI interrupt */
  82. for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
  83. try = num;
  84. while (try) {
  85. hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
  86. if (hwirq >= 0)
  87. break;
  88. try /= 2;
  89. }
  90. if (!try)
  91. goto fail;
  92. irqs->offset[i] = phb->msi_base + hwirq;
  93. irqs->range[i] = try;
  94. pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
  95. i, irqs->offset[i], irqs->range[i]);
  96. num -= try;
  97. }
  98. if (num)
  99. goto fail;
  100. return 0;
  101. fail:
  102. pnv_cxl_release_hwirq_ranges(irqs, dev);
  103. return -ENOSPC;
  104. }
  105. EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
  106. int pnv_cxl_get_irq_count(struct pci_dev *dev)
  107. {
  108. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  109. struct pnv_phb *phb = hose->private_data;
  110. return phb->msi_bmp.irq_count;
  111. }
  112. EXPORT_SYMBOL(pnv_cxl_get_irq_count);
  113. int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
  114. unsigned int virq)
  115. {
  116. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  117. struct pnv_phb *phb = hose->private_data;
  118. unsigned int xive_num = hwirq - phb->msi_base;
  119. struct pnv_ioda_pe *pe;
  120. int rc;
  121. if (!(pe = pnv_ioda_get_pe(dev)))
  122. return -ENODEV;
  123. /* Assign XIVE to PE */
  124. rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
  125. if (rc) {
  126. pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
  127. "hwirq 0x%x XIVE 0x%x PE\n",
  128. pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
  129. return -EIO;
  130. }
  131. pnv_set_msi_irq_chip(phb, virq);
  132. return 0;
  133. }
  134. EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
  135. #if IS_MODULE(CONFIG_CXL)
  136. static inline int get_cxl_module(void)
  137. {
  138. struct module *cxl_module;
  139. mutex_lock(&module_mutex);
  140. cxl_module = find_module("cxl");
  141. if (cxl_module)
  142. __module_get(cxl_module);
  143. mutex_unlock(&module_mutex);
  144. if (!cxl_module)
  145. return -ENODEV;
  146. return 0;
  147. }
  148. #else
  149. static inline int get_cxl_module(void) { return 0; }
  150. #endif
  151. /*
  152. * Sets flags and switches the controller ops to enable the cxl kernel api.
  153. * Originally the cxl kernel API operated on a virtual PHB, but certain cards
  154. * such as the Mellanox CX4 use a peer model instead and for these cards the
  155. * cxl kernel api will operate on the real PHB.
  156. */
  157. int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable)
  158. {
  159. struct pnv_phb *phb = hose->private_data;
  160. int rc;
  161. if (!enable) {
  162. /*
  163. * Once cxl mode is enabled on the PHB, there is currently no
  164. * known safe method to disable it again, and trying risks a
  165. * checkstop. If we can find a way to safely disable cxl mode
  166. * in the future we can revisit this, but for now the only sane
  167. * thing to do is to refuse to disable cxl mode:
  168. */
  169. return -EPERM;
  170. }
  171. /*
  172. * Hold a reference to the cxl module since several PHB operations now
  173. * depend on it, and it would be insane to allow it to be removed so
  174. * long as we are in this mode (and since we can't safely disable this
  175. * mode once enabled...).
  176. */
  177. rc = get_cxl_module();
  178. if (rc)
  179. return rc;
  180. phb->flags |= PNV_PHB_FLAG_CXL;
  181. hose->controller_ops = pnv_cxl_cx4_ioda_controller_ops;
  182. return 0;
  183. }
  184. EXPORT_SYMBOL_GPL(pnv_cxl_enable_phb_kernel_api);
  185. bool pnv_pci_on_cxl_phb(struct pci_dev *dev)
  186. {
  187. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  188. struct pnv_phb *phb = hose->private_data;
  189. return !!(phb->flags & PNV_PHB_FLAG_CXL);
  190. }
  191. EXPORT_SYMBOL_GPL(pnv_pci_on_cxl_phb);
  192. struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose)
  193. {
  194. struct pnv_phb *phb = hose->private_data;
  195. return (struct cxl_afu *)phb->cxl_afu;
  196. }
  197. EXPORT_SYMBOL_GPL(pnv_cxl_phb_to_afu);
  198. void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu)
  199. {
  200. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  201. struct pnv_phb *phb = hose->private_data;
  202. phb->cxl_afu = afu;
  203. }
  204. EXPORT_SYMBOL_GPL(pnv_cxl_phb_set_peer_afu);
  205. /*
  206. * In the peer cxl model, the XSL/PSL is physical function 0, and will be used
  207. * by other functions on the device for memory access and interrupts. When the
  208. * other functions are enabled we explicitly take a reference on the cxl
  209. * function since they will use it, and allocate a default context associated
  210. * with that function just like the vPHB model of the cxl kernel API.
  211. */
  212. bool pnv_cxl_enable_device_hook(struct pci_dev *dev)
  213. {
  214. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  215. struct pnv_phb *phb = hose->private_data;
  216. struct cxl_afu *afu = phb->cxl_afu;
  217. if (!pnv_pci_enable_device_hook(dev))
  218. return false;
  219. /* No special handling for the cxl function, which is always PF 0 */
  220. if (PCI_FUNC(dev->devfn) == 0)
  221. return true;
  222. if (!afu) {
  223. dev_WARN(&dev->dev, "Attempted to enable function > 0 on CXL PHB without a peer AFU\n");
  224. return false;
  225. }
  226. dev_info(&dev->dev, "Enabling function on CXL enabled PHB with peer AFU\n");
  227. /* Make sure the peer AFU can't go away while this device is active */
  228. cxl_afu_get(afu);
  229. return cxl_pci_associate_default_context(dev, afu);
  230. }
  231. void pnv_cxl_disable_device(struct pci_dev *dev)
  232. {
  233. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  234. struct pnv_phb *phb = hose->private_data;
  235. struct cxl_afu *afu = phb->cxl_afu;
  236. /* No special handling for cxl function: */
  237. if (PCI_FUNC(dev->devfn) == 0)
  238. return;
  239. cxl_pci_disable_device(dev);
  240. cxl_afu_put(afu);
  241. }
  242. /*
  243. * This is a special version of pnv_setup_msi_irqs for cards in cxl mode. This
  244. * function handles setting up the IVTE entries for the XSL to use.
  245. *
  246. * We are currently not filling out the MSIX table, since the only currently
  247. * supported adapter (CX4) uses a custom MSIX table format in cxl mode and it
  248. * is up to their driver to fill that out. In the future we may fill out the
  249. * MSIX table (and change the IVTE entries to be an index to the MSIX table)
  250. * for adapters implementing the Full MSI-X mode described in the CAIA.
  251. */
  252. int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  253. {
  254. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  255. struct pnv_phb *phb = hose->private_data;
  256. struct msi_desc *entry;
  257. struct cxl_context *ctx = NULL;
  258. unsigned int virq;
  259. int hwirq;
  260. int afu_irq = 0;
  261. int rc;
  262. if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
  263. return -ENODEV;
  264. if (pdev->no_64bit_msi && !phb->msi32_support)
  265. return -ENODEV;
  266. rc = cxl_cx4_setup_msi_irqs(pdev, nvec, type);
  267. if (rc)
  268. return rc;
  269. for_each_pci_msi_entry(entry, pdev) {
  270. if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
  271. pr_warn("%s: Supports only 64-bit MSIs\n",
  272. pci_name(pdev));
  273. return -ENXIO;
  274. }
  275. hwirq = cxl_next_msi_hwirq(pdev, &ctx, &afu_irq);
  276. if (WARN_ON(hwirq <= 0))
  277. return (hwirq ? hwirq : -ENOMEM);
  278. virq = irq_create_mapping(NULL, hwirq);
  279. if (!virq) {
  280. pr_warn("%s: Failed to map cxl mode MSI to linux irq\n",
  281. pci_name(pdev));
  282. return -ENOMEM;
  283. }
  284. rc = pnv_cxl_ioda_msi_setup(pdev, hwirq, virq);
  285. if (rc) {
  286. pr_warn("%s: Failed to setup cxl mode MSI\n", pci_name(pdev));
  287. irq_dispose_mapping(virq);
  288. return rc;
  289. }
  290. irq_set_msi_desc(virq, entry);
  291. }
  292. return 0;
  293. }
  294. void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
  295. {
  296. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  297. struct pnv_phb *phb = hose->private_data;
  298. struct msi_desc *entry;
  299. irq_hw_number_t hwirq;
  300. if (WARN_ON(!phb))
  301. return;
  302. for_each_pci_msi_entry(entry, pdev) {
  303. if (!entry->irq)
  304. continue;
  305. hwirq = virq_to_hw(entry->irq);
  306. irq_set_msi_desc(entry->irq, NULL);
  307. irq_dispose_mapping(entry->irq);
  308. }
  309. cxl_cx4_teardown_msi_irqs(pdev);
  310. }