pci.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288
  1. /*
  2. * Support PCI/PCIe on PowerNV platforms
  3. *
  4. * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/pci.h>
  13. #include <linux/delay.h>
  14. #include <linux/string.h>
  15. #include <linux/init.h>
  16. #include <linux/irq.h>
  17. #include <linux/io.h>
  18. #include <linux/msi.h>
  19. #include <linux/iommu.h>
  20. #include <linux/sched/mm.h>
  21. #include <asm/sections.h>
  22. #include <asm/io.h>
  23. #include <asm/prom.h>
  24. #include <asm/pci-bridge.h>
  25. #include <asm/machdep.h>
  26. #include <asm/msi_bitmap.h>
  27. #include <asm/ppc-pci.h>
  28. #include <asm/pnv-pci.h>
  29. #include <asm/opal.h>
  30. #include <asm/iommu.h>
  31. #include <asm/tce.h>
  32. #include <asm/firmware.h>
  33. #include <asm/eeh_event.h>
  34. #include <asm/eeh.h>
  35. #include "powernv.h"
  36. #include "pci.h"
  37. static DEFINE_MUTEX(p2p_mutex);
  38. static DEFINE_MUTEX(tunnel_mutex);
  39. int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
  40. {
  41. struct device_node *parent = np;
  42. u32 bdfn;
  43. u64 phbid;
  44. int ret;
  45. ret = of_property_read_u32(np, "reg", &bdfn);
  46. if (ret)
  47. return -ENXIO;
  48. bdfn = ((bdfn & 0x00ffff00) >> 8);
  49. while ((parent = of_get_parent(parent))) {
  50. if (!PCI_DN(parent)) {
  51. of_node_put(parent);
  52. break;
  53. }
  54. if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) {
  55. of_node_put(parent);
  56. continue;
  57. }
  58. ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid);
  59. if (ret) {
  60. of_node_put(parent);
  61. return -ENXIO;
  62. }
  63. *id = PCI_SLOT_ID(phbid, bdfn);
  64. return 0;
  65. }
  66. return -ENODEV;
  67. }
  68. EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
  69. int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
  70. {
  71. int64_t rc;
  72. if (!opal_check_token(OPAL_GET_DEVICE_TREE))
  73. return -ENXIO;
  74. rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
  75. if (rc < OPAL_SUCCESS)
  76. return -EIO;
  77. return rc;
  78. }
  79. EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
  80. int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
  81. {
  82. int64_t rc;
  83. if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
  84. return -ENXIO;
  85. rc = opal_pci_get_presence_state(id, (uint64_t)state);
  86. if (rc != OPAL_SUCCESS)
  87. return -EIO;
  88. return 0;
  89. }
  90. EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
  91. int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
  92. {
  93. int64_t rc;
  94. if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
  95. return -ENXIO;
  96. rc = opal_pci_get_power_state(id, (uint64_t)state);
  97. if (rc != OPAL_SUCCESS)
  98. return -EIO;
  99. return 0;
  100. }
  101. EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
  102. int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
  103. {
  104. struct opal_msg m;
  105. int token, ret;
  106. int64_t rc;
  107. if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
  108. return -ENXIO;
  109. token = opal_async_get_token_interruptible();
  110. if (unlikely(token < 0))
  111. return token;
  112. rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
  113. if (rc == OPAL_SUCCESS) {
  114. ret = 0;
  115. goto exit;
  116. } else if (rc != OPAL_ASYNC_COMPLETION) {
  117. ret = -EIO;
  118. goto exit;
  119. }
  120. ret = opal_async_wait_response(token, &m);
  121. if (ret < 0)
  122. goto exit;
  123. if (msg) {
  124. ret = 1;
  125. memcpy(msg, &m, sizeof(m));
  126. }
  127. exit:
  128. opal_async_release_token(token);
  129. return ret;
  130. }
  131. EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
  132. #ifdef CONFIG_PCI_MSI
  133. int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  134. {
  135. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  136. struct pnv_phb *phb = hose->private_data;
  137. struct msi_desc *entry;
  138. struct msi_msg msg;
  139. int hwirq;
  140. unsigned int virq;
  141. int rc;
  142. if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
  143. return -ENODEV;
  144. if (pdev->no_64bit_msi && !phb->msi32_support)
  145. return -ENODEV;
  146. for_each_pci_msi_entry(entry, pdev) {
  147. if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
  148. pr_warn("%s: Supports only 64-bit MSIs\n",
  149. pci_name(pdev));
  150. return -ENXIO;
  151. }
  152. hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
  153. if (hwirq < 0) {
  154. pr_warn("%s: Failed to find a free MSI\n",
  155. pci_name(pdev));
  156. return -ENOSPC;
  157. }
  158. virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
  159. if (!virq) {
  160. pr_warn("%s: Failed to map MSI to linux irq\n",
  161. pci_name(pdev));
  162. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  163. return -ENOMEM;
  164. }
  165. rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
  166. virq, entry->msi_attrib.is_64, &msg);
  167. if (rc) {
  168. pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
  169. irq_dispose_mapping(virq);
  170. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  171. return rc;
  172. }
  173. irq_set_msi_desc(virq, entry);
  174. pci_write_msi_msg(virq, &msg);
  175. }
  176. return 0;
  177. }
  178. void pnv_teardown_msi_irqs(struct pci_dev *pdev)
  179. {
  180. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  181. struct pnv_phb *phb = hose->private_data;
  182. struct msi_desc *entry;
  183. irq_hw_number_t hwirq;
  184. if (WARN_ON(!phb))
  185. return;
  186. for_each_pci_msi_entry(entry, pdev) {
  187. if (!entry->irq)
  188. continue;
  189. hwirq = virq_to_hw(entry->irq);
  190. irq_set_msi_desc(entry->irq, NULL);
  191. irq_dispose_mapping(entry->irq);
  192. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
  193. }
  194. }
  195. #endif /* CONFIG_PCI_MSI */
  196. /* Nicely print the contents of the PE State Tables (PEST). */
  197. static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
  198. {
  199. __be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
  200. bool dup = false;
  201. int i;
  202. for (i = 0; i < pest_size; i++) {
  203. __be64 peA = be64_to_cpu(pestA[i]);
  204. __be64 peB = be64_to_cpu(pestB[i]);
  205. if (peA != prevA || peB != prevB) {
  206. if (dup) {
  207. pr_info("PE[..%03x] A/B: as above\n", i-1);
  208. dup = false;
  209. }
  210. prevA = peA;
  211. prevB = peB;
  212. if (peA & PNV_IODA_STOPPED_STATE ||
  213. peB & PNV_IODA_STOPPED_STATE)
  214. pr_info("PE[%03x] A/B: %016llx %016llx\n",
  215. i, peA, peB);
  216. } else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
  217. peB & PNV_IODA_STOPPED_STATE)) {
  218. dup = true;
  219. }
  220. }
  221. }
  222. static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
  223. struct OpalIoPhbErrorCommon *common)
  224. {
  225. struct OpalIoP7IOCPhbErrorData *data;
  226. data = (struct OpalIoP7IOCPhbErrorData *)common;
  227. pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
  228. hose->global_number, be32_to_cpu(common->version));
  229. if (data->brdgCtl)
  230. pr_info("brdgCtl: %08x\n",
  231. be32_to_cpu(data->brdgCtl));
  232. if (data->portStatusReg || data->rootCmplxStatus ||
  233. data->busAgentStatus)
  234. pr_info("UtlSts: %08x %08x %08x\n",
  235. be32_to_cpu(data->portStatusReg),
  236. be32_to_cpu(data->rootCmplxStatus),
  237. be32_to_cpu(data->busAgentStatus));
  238. if (data->deviceStatus || data->slotStatus ||
  239. data->linkStatus || data->devCmdStatus ||
  240. data->devSecStatus)
  241. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  242. be32_to_cpu(data->deviceStatus),
  243. be32_to_cpu(data->slotStatus),
  244. be32_to_cpu(data->linkStatus),
  245. be32_to_cpu(data->devCmdStatus),
  246. be32_to_cpu(data->devSecStatus));
  247. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  248. data->corrErrorStatus)
  249. pr_info("RootErrSts: %08x %08x %08x\n",
  250. be32_to_cpu(data->rootErrorStatus),
  251. be32_to_cpu(data->uncorrErrorStatus),
  252. be32_to_cpu(data->corrErrorStatus));
  253. if (data->tlpHdr1 || data->tlpHdr2 ||
  254. data->tlpHdr3 || data->tlpHdr4)
  255. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  256. be32_to_cpu(data->tlpHdr1),
  257. be32_to_cpu(data->tlpHdr2),
  258. be32_to_cpu(data->tlpHdr3),
  259. be32_to_cpu(data->tlpHdr4));
  260. if (data->sourceId || data->errorClass ||
  261. data->correlator)
  262. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  263. be32_to_cpu(data->sourceId),
  264. be64_to_cpu(data->errorClass),
  265. be64_to_cpu(data->correlator));
  266. if (data->p7iocPlssr || data->p7iocCsr)
  267. pr_info("PhbSts: %016llx %016llx\n",
  268. be64_to_cpu(data->p7iocPlssr),
  269. be64_to_cpu(data->p7iocCsr));
  270. if (data->lemFir)
  271. pr_info("Lem: %016llx %016llx %016llx\n",
  272. be64_to_cpu(data->lemFir),
  273. be64_to_cpu(data->lemErrorMask),
  274. be64_to_cpu(data->lemWOF));
  275. if (data->phbErrorStatus)
  276. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  277. be64_to_cpu(data->phbErrorStatus),
  278. be64_to_cpu(data->phbFirstErrorStatus),
  279. be64_to_cpu(data->phbErrorLog0),
  280. be64_to_cpu(data->phbErrorLog1));
  281. if (data->mmioErrorStatus)
  282. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  283. be64_to_cpu(data->mmioErrorStatus),
  284. be64_to_cpu(data->mmioFirstErrorStatus),
  285. be64_to_cpu(data->mmioErrorLog0),
  286. be64_to_cpu(data->mmioErrorLog1));
  287. if (data->dma0ErrorStatus)
  288. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  289. be64_to_cpu(data->dma0ErrorStatus),
  290. be64_to_cpu(data->dma0FirstErrorStatus),
  291. be64_to_cpu(data->dma0ErrorLog0),
  292. be64_to_cpu(data->dma0ErrorLog1));
  293. if (data->dma1ErrorStatus)
  294. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  295. be64_to_cpu(data->dma1ErrorStatus),
  296. be64_to_cpu(data->dma1FirstErrorStatus),
  297. be64_to_cpu(data->dma1ErrorLog0),
  298. be64_to_cpu(data->dma1ErrorLog1));
  299. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
  300. }
  301. static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
  302. struct OpalIoPhbErrorCommon *common)
  303. {
  304. struct OpalIoPhb3ErrorData *data;
  305. data = (struct OpalIoPhb3ErrorData*)common;
  306. pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
  307. hose->global_number, be32_to_cpu(common->version));
  308. if (data->brdgCtl)
  309. pr_info("brdgCtl: %08x\n",
  310. be32_to_cpu(data->brdgCtl));
  311. if (data->portStatusReg || data->rootCmplxStatus ||
  312. data->busAgentStatus)
  313. pr_info("UtlSts: %08x %08x %08x\n",
  314. be32_to_cpu(data->portStatusReg),
  315. be32_to_cpu(data->rootCmplxStatus),
  316. be32_to_cpu(data->busAgentStatus));
  317. if (data->deviceStatus || data->slotStatus ||
  318. data->linkStatus || data->devCmdStatus ||
  319. data->devSecStatus)
  320. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  321. be32_to_cpu(data->deviceStatus),
  322. be32_to_cpu(data->slotStatus),
  323. be32_to_cpu(data->linkStatus),
  324. be32_to_cpu(data->devCmdStatus),
  325. be32_to_cpu(data->devSecStatus));
  326. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  327. data->corrErrorStatus)
  328. pr_info("RootErrSts: %08x %08x %08x\n",
  329. be32_to_cpu(data->rootErrorStatus),
  330. be32_to_cpu(data->uncorrErrorStatus),
  331. be32_to_cpu(data->corrErrorStatus));
  332. if (data->tlpHdr1 || data->tlpHdr2 ||
  333. data->tlpHdr3 || data->tlpHdr4)
  334. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  335. be32_to_cpu(data->tlpHdr1),
  336. be32_to_cpu(data->tlpHdr2),
  337. be32_to_cpu(data->tlpHdr3),
  338. be32_to_cpu(data->tlpHdr4));
  339. if (data->sourceId || data->errorClass ||
  340. data->correlator)
  341. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  342. be32_to_cpu(data->sourceId),
  343. be64_to_cpu(data->errorClass),
  344. be64_to_cpu(data->correlator));
  345. if (data->nFir)
  346. pr_info("nFir: %016llx %016llx %016llx\n",
  347. be64_to_cpu(data->nFir),
  348. be64_to_cpu(data->nFirMask),
  349. be64_to_cpu(data->nFirWOF));
  350. if (data->phbPlssr || data->phbCsr)
  351. pr_info("PhbSts: %016llx %016llx\n",
  352. be64_to_cpu(data->phbPlssr),
  353. be64_to_cpu(data->phbCsr));
  354. if (data->lemFir)
  355. pr_info("Lem: %016llx %016llx %016llx\n",
  356. be64_to_cpu(data->lemFir),
  357. be64_to_cpu(data->lemErrorMask),
  358. be64_to_cpu(data->lemWOF));
  359. if (data->phbErrorStatus)
  360. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  361. be64_to_cpu(data->phbErrorStatus),
  362. be64_to_cpu(data->phbFirstErrorStatus),
  363. be64_to_cpu(data->phbErrorLog0),
  364. be64_to_cpu(data->phbErrorLog1));
  365. if (data->mmioErrorStatus)
  366. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  367. be64_to_cpu(data->mmioErrorStatus),
  368. be64_to_cpu(data->mmioFirstErrorStatus),
  369. be64_to_cpu(data->mmioErrorLog0),
  370. be64_to_cpu(data->mmioErrorLog1));
  371. if (data->dma0ErrorStatus)
  372. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  373. be64_to_cpu(data->dma0ErrorStatus),
  374. be64_to_cpu(data->dma0FirstErrorStatus),
  375. be64_to_cpu(data->dma0ErrorLog0),
  376. be64_to_cpu(data->dma0ErrorLog1));
  377. if (data->dma1ErrorStatus)
  378. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  379. be64_to_cpu(data->dma1ErrorStatus),
  380. be64_to_cpu(data->dma1FirstErrorStatus),
  381. be64_to_cpu(data->dma1ErrorLog0),
  382. be64_to_cpu(data->dma1ErrorLog1));
  383. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
  384. }
  385. static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
  386. struct OpalIoPhbErrorCommon *common)
  387. {
  388. struct OpalIoPhb4ErrorData *data;
  389. data = (struct OpalIoPhb4ErrorData*)common;
  390. pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
  391. hose->global_number, be32_to_cpu(common->version));
  392. if (data->brdgCtl)
  393. pr_info("brdgCtl: %08x\n",
  394. be32_to_cpu(data->brdgCtl));
  395. if (data->deviceStatus || data->slotStatus ||
  396. data->linkStatus || data->devCmdStatus ||
  397. data->devSecStatus)
  398. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  399. be32_to_cpu(data->deviceStatus),
  400. be32_to_cpu(data->slotStatus),
  401. be32_to_cpu(data->linkStatus),
  402. be32_to_cpu(data->devCmdStatus),
  403. be32_to_cpu(data->devSecStatus));
  404. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  405. data->corrErrorStatus)
  406. pr_info("RootErrSts: %08x %08x %08x\n",
  407. be32_to_cpu(data->rootErrorStatus),
  408. be32_to_cpu(data->uncorrErrorStatus),
  409. be32_to_cpu(data->corrErrorStatus));
  410. if (data->tlpHdr1 || data->tlpHdr2 ||
  411. data->tlpHdr3 || data->tlpHdr4)
  412. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  413. be32_to_cpu(data->tlpHdr1),
  414. be32_to_cpu(data->tlpHdr2),
  415. be32_to_cpu(data->tlpHdr3),
  416. be32_to_cpu(data->tlpHdr4));
  417. if (data->sourceId)
  418. pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId));
  419. if (data->nFir)
  420. pr_info("nFir: %016llx %016llx %016llx\n",
  421. be64_to_cpu(data->nFir),
  422. be64_to_cpu(data->nFirMask),
  423. be64_to_cpu(data->nFirWOF));
  424. if (data->phbPlssr || data->phbCsr)
  425. pr_info("PhbSts: %016llx %016llx\n",
  426. be64_to_cpu(data->phbPlssr),
  427. be64_to_cpu(data->phbCsr));
  428. if (data->lemFir)
  429. pr_info("Lem: %016llx %016llx %016llx\n",
  430. be64_to_cpu(data->lemFir),
  431. be64_to_cpu(data->lemErrorMask),
  432. be64_to_cpu(data->lemWOF));
  433. if (data->phbErrorStatus)
  434. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  435. be64_to_cpu(data->phbErrorStatus),
  436. be64_to_cpu(data->phbFirstErrorStatus),
  437. be64_to_cpu(data->phbErrorLog0),
  438. be64_to_cpu(data->phbErrorLog1));
  439. if (data->phbTxeErrorStatus)
  440. pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n",
  441. be64_to_cpu(data->phbTxeErrorStatus),
  442. be64_to_cpu(data->phbTxeFirstErrorStatus),
  443. be64_to_cpu(data->phbTxeErrorLog0),
  444. be64_to_cpu(data->phbTxeErrorLog1));
  445. if (data->phbRxeArbErrorStatus)
  446. pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n",
  447. be64_to_cpu(data->phbRxeArbErrorStatus),
  448. be64_to_cpu(data->phbRxeArbFirstErrorStatus),
  449. be64_to_cpu(data->phbRxeArbErrorLog0),
  450. be64_to_cpu(data->phbRxeArbErrorLog1));
  451. if (data->phbRxeMrgErrorStatus)
  452. pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n",
  453. be64_to_cpu(data->phbRxeMrgErrorStatus),
  454. be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
  455. be64_to_cpu(data->phbRxeMrgErrorLog0),
  456. be64_to_cpu(data->phbRxeMrgErrorLog1));
  457. if (data->phbRxeTceErrorStatus)
  458. pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n",
  459. be64_to_cpu(data->phbRxeTceErrorStatus),
  460. be64_to_cpu(data->phbRxeTceFirstErrorStatus),
  461. be64_to_cpu(data->phbRxeTceErrorLog0),
  462. be64_to_cpu(data->phbRxeTceErrorLog1));
  463. if (data->phbPblErrorStatus)
  464. pr_info("PblErr: %016llx %016llx %016llx %016llx\n",
  465. be64_to_cpu(data->phbPblErrorStatus),
  466. be64_to_cpu(data->phbPblFirstErrorStatus),
  467. be64_to_cpu(data->phbPblErrorLog0),
  468. be64_to_cpu(data->phbPblErrorLog1));
  469. if (data->phbPcieDlpErrorStatus)
  470. pr_info("PcieDlp: %016llx %016llx %016llx\n",
  471. be64_to_cpu(data->phbPcieDlpErrorLog1),
  472. be64_to_cpu(data->phbPcieDlpErrorLog2),
  473. be64_to_cpu(data->phbPcieDlpErrorStatus));
  474. if (data->phbRegbErrorStatus)
  475. pr_info("RegbErr: %016llx %016llx %016llx %016llx\n",
  476. be64_to_cpu(data->phbRegbErrorStatus),
  477. be64_to_cpu(data->phbRegbFirstErrorStatus),
  478. be64_to_cpu(data->phbRegbErrorLog0),
  479. be64_to_cpu(data->phbRegbErrorLog1));
  480. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
  481. }
  482. void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
  483. unsigned char *log_buff)
  484. {
  485. struct OpalIoPhbErrorCommon *common;
  486. if (!hose || !log_buff)
  487. return;
  488. common = (struct OpalIoPhbErrorCommon *)log_buff;
  489. switch (be32_to_cpu(common->ioType)) {
  490. case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
  491. pnv_pci_dump_p7ioc_diag_data(hose, common);
  492. break;
  493. case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
  494. pnv_pci_dump_phb3_diag_data(hose, common);
  495. break;
  496. case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
  497. pnv_pci_dump_phb4_diag_data(hose, common);
  498. break;
  499. default:
  500. pr_warn("%s: Unrecognized ioType %d\n",
  501. __func__, be32_to_cpu(common->ioType));
  502. }
  503. }
  504. static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
  505. {
  506. unsigned long flags, rc;
  507. int has_diag, ret = 0;
  508. spin_lock_irqsave(&phb->lock, flags);
  509. /* Fetch PHB diag-data */
  510. rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
  511. phb->diag_data_size);
  512. has_diag = (rc == OPAL_SUCCESS);
  513. /* If PHB supports compound PE, to handle it */
  514. if (phb->unfreeze_pe) {
  515. ret = phb->unfreeze_pe(phb,
  516. pe_no,
  517. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  518. } else {
  519. rc = opal_pci_eeh_freeze_clear(phb->opal_id,
  520. pe_no,
  521. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  522. if (rc) {
  523. pr_warn("%s: Failure %ld clearing frozen "
  524. "PHB#%x-PE#%x\n",
  525. __func__, rc, phb->hose->global_number,
  526. pe_no);
  527. ret = -EIO;
  528. }
  529. }
  530. /*
  531. * For now, let's only display the diag buffer when we fail to clear
  532. * the EEH status. We'll do more sensible things later when we have
  533. * proper EEH support. We need to make sure we don't pollute ourselves
  534. * with the normal errors generated when probing empty slots
  535. */
  536. if (has_diag && ret)
  537. pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
  538. spin_unlock_irqrestore(&phb->lock, flags);
  539. }
  540. static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
  541. {
  542. struct pnv_phb *phb = pdn->phb->private_data;
  543. u8 fstate;
  544. __be16 pcierr;
  545. unsigned int pe_no;
  546. s64 rc;
  547. /*
  548. * Get the PE#. During the PCI probe stage, we might not
  549. * setup that yet. So all ER errors should be mapped to
  550. * reserved PE.
  551. */
  552. pe_no = pdn->pe_number;
  553. if (pe_no == IODA_INVALID_PE) {
  554. pe_no = phb->ioda.reserved_pe_idx;
  555. }
  556. /*
  557. * Fetch frozen state. If the PHB support compound PE,
  558. * we need handle that case.
  559. */
  560. if (phb->get_pe_state) {
  561. fstate = phb->get_pe_state(phb, pe_no);
  562. } else {
  563. rc = opal_pci_eeh_freeze_status(phb->opal_id,
  564. pe_no,
  565. &fstate,
  566. &pcierr,
  567. NULL);
  568. if (rc) {
  569. pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
  570. __func__, rc, phb->hose->global_number, pe_no);
  571. return;
  572. }
  573. }
  574. pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
  575. (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
  576. /* Clear the frozen state if applicable */
  577. if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
  578. fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
  579. fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
  580. /*
  581. * If PHB supports compound PE, freeze it for
  582. * consistency.
  583. */
  584. if (phb->freeze_pe)
  585. phb->freeze_pe(phb, pe_no);
  586. pnv_pci_handle_eeh_config(phb, pe_no);
  587. }
  588. }
  589. int pnv_pci_cfg_read(struct pci_dn *pdn,
  590. int where, int size, u32 *val)
  591. {
  592. struct pnv_phb *phb = pdn->phb->private_data;
  593. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  594. s64 rc;
  595. switch (size) {
  596. case 1: {
  597. u8 v8;
  598. rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
  599. *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
  600. break;
  601. }
  602. case 2: {
  603. __be16 v16;
  604. rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
  605. &v16);
  606. *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
  607. break;
  608. }
  609. case 4: {
  610. __be32 v32;
  611. rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
  612. *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
  613. break;
  614. }
  615. default:
  616. return PCIBIOS_FUNC_NOT_SUPPORTED;
  617. }
  618. pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  619. __func__, pdn->busno, pdn->devfn, where, size, *val);
  620. return PCIBIOS_SUCCESSFUL;
  621. }
  622. int pnv_pci_cfg_write(struct pci_dn *pdn,
  623. int where, int size, u32 val)
  624. {
  625. struct pnv_phb *phb = pdn->phb->private_data;
  626. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  627. pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  628. __func__, pdn->busno, pdn->devfn, where, size, val);
  629. switch (size) {
  630. case 1:
  631. opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
  632. break;
  633. case 2:
  634. opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
  635. break;
  636. case 4:
  637. opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
  638. break;
  639. default:
  640. return PCIBIOS_FUNC_NOT_SUPPORTED;
  641. }
  642. return PCIBIOS_SUCCESSFUL;
  643. }
  644. #if CONFIG_EEH
  645. static bool pnv_pci_cfg_check(struct pci_dn *pdn)
  646. {
  647. struct eeh_dev *edev = NULL;
  648. struct pnv_phb *phb = pdn->phb->private_data;
  649. /* EEH not enabled ? */
  650. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  651. return true;
  652. /* PE reset or device removed ? */
  653. edev = pdn->edev;
  654. if (edev) {
  655. if (edev->pe &&
  656. (edev->pe->state & EEH_PE_CFG_BLOCKED))
  657. return false;
  658. if (edev->mode & EEH_DEV_REMOVED)
  659. return false;
  660. }
  661. return true;
  662. }
  663. #else
  664. static inline pnv_pci_cfg_check(struct pci_dn *pdn)
  665. {
  666. return true;
  667. }
  668. #endif /* CONFIG_EEH */
  669. static int pnv_pci_read_config(struct pci_bus *bus,
  670. unsigned int devfn,
  671. int where, int size, u32 *val)
  672. {
  673. struct pci_dn *pdn;
  674. struct pnv_phb *phb;
  675. int ret;
  676. *val = 0xFFFFFFFF;
  677. pdn = pci_get_pdn_by_devfn(bus, devfn);
  678. if (!pdn)
  679. return PCIBIOS_DEVICE_NOT_FOUND;
  680. if (!pnv_pci_cfg_check(pdn))
  681. return PCIBIOS_DEVICE_NOT_FOUND;
  682. ret = pnv_pci_cfg_read(pdn, where, size, val);
  683. phb = pdn->phb->private_data;
  684. if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
  685. if (*val == EEH_IO_ERROR_VALUE(size) &&
  686. eeh_dev_check_failure(pdn->edev))
  687. return PCIBIOS_DEVICE_NOT_FOUND;
  688. } else {
  689. pnv_pci_config_check_eeh(pdn);
  690. }
  691. return ret;
  692. }
  693. static int pnv_pci_write_config(struct pci_bus *bus,
  694. unsigned int devfn,
  695. int where, int size, u32 val)
  696. {
  697. struct pci_dn *pdn;
  698. struct pnv_phb *phb;
  699. int ret;
  700. pdn = pci_get_pdn_by_devfn(bus, devfn);
  701. if (!pdn)
  702. return PCIBIOS_DEVICE_NOT_FOUND;
  703. if (!pnv_pci_cfg_check(pdn))
  704. return PCIBIOS_DEVICE_NOT_FOUND;
  705. ret = pnv_pci_cfg_write(pdn, where, size, val);
  706. phb = pdn->phb->private_data;
  707. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  708. pnv_pci_config_check_eeh(pdn);
  709. return ret;
  710. }
  711. struct pci_ops pnv_pci_ops = {
  712. .read = pnv_pci_read_config,
  713. .write = pnv_pci_write_config,
  714. };
  715. static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
  716. {
  717. __be64 *tmp = ((__be64 *)tbl->it_base);
  718. int level = tbl->it_indirect_levels;
  719. const long shift = ilog2(tbl->it_level_size);
  720. unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
  721. while (level) {
  722. int n = (idx & mask) >> (level * shift);
  723. unsigned long tce = be64_to_cpu(tmp[n]);
  724. tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
  725. idx &= ~mask;
  726. mask >>= shift;
  727. --level;
  728. }
  729. return tmp + idx;
  730. }
  731. int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
  732. unsigned long uaddr, enum dma_data_direction direction,
  733. unsigned long attrs)
  734. {
  735. u64 proto_tce = iommu_direction_to_tce_perm(direction);
  736. u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
  737. long i;
  738. if (proto_tce & TCE_PCI_WRITE)
  739. proto_tce |= TCE_PCI_READ;
  740. for (i = 0; i < npages; i++) {
  741. unsigned long newtce = proto_tce |
  742. ((rpn + i) << tbl->it_page_shift);
  743. unsigned long idx = index - tbl->it_offset + i;
  744. *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
  745. }
  746. return 0;
  747. }
  748. #ifdef CONFIG_IOMMU_API
  749. int pnv_tce_xchg(struct iommu_table *tbl, long index,
  750. unsigned long *hpa, enum dma_data_direction *direction)
  751. {
  752. u64 proto_tce = iommu_direction_to_tce_perm(*direction);
  753. unsigned long newtce = *hpa | proto_tce, oldtce;
  754. unsigned long idx = index - tbl->it_offset;
  755. BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
  756. if (newtce & TCE_PCI_WRITE)
  757. newtce |= TCE_PCI_READ;
  758. oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)));
  759. *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
  760. *direction = iommu_tce_direction(oldtce);
  761. return 0;
  762. }
  763. #endif
  764. void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
  765. {
  766. long i;
  767. for (i = 0; i < npages; i++) {
  768. unsigned long idx = index - tbl->it_offset + i;
  769. *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
  770. }
  771. }
  772. unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
  773. {
  774. return be64_to_cpu(*(pnv_tce(tbl, index - tbl->it_offset)));
  775. }
  776. struct iommu_table *pnv_pci_table_alloc(int nid)
  777. {
  778. struct iommu_table *tbl;
  779. tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
  780. if (!tbl)
  781. return NULL;
  782. INIT_LIST_HEAD_RCU(&tbl->it_group_list);
  783. kref_init(&tbl->it_kref);
  784. return tbl;
  785. }
  786. long pnv_pci_link_table_and_group(int node, int num,
  787. struct iommu_table *tbl,
  788. struct iommu_table_group *table_group)
  789. {
  790. struct iommu_table_group_link *tgl = NULL;
  791. if (WARN_ON(!tbl || !table_group))
  792. return -EINVAL;
  793. tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
  794. node);
  795. if (!tgl)
  796. return -ENOMEM;
  797. tgl->table_group = table_group;
  798. list_add_rcu(&tgl->next, &tbl->it_group_list);
  799. table_group->tables[num] = tbl;
  800. return 0;
  801. }
  802. static void pnv_iommu_table_group_link_free(struct rcu_head *head)
  803. {
  804. struct iommu_table_group_link *tgl = container_of(head,
  805. struct iommu_table_group_link, rcu);
  806. kfree(tgl);
  807. }
  808. void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
  809. struct iommu_table_group *table_group)
  810. {
  811. long i;
  812. bool found;
  813. struct iommu_table_group_link *tgl;
  814. if (!tbl || !table_group)
  815. return;
  816. /* Remove link to a group from table's list of attached groups */
  817. found = false;
  818. list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
  819. if (tgl->table_group == table_group) {
  820. list_del_rcu(&tgl->next);
  821. call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
  822. found = true;
  823. break;
  824. }
  825. }
  826. if (WARN_ON(!found))
  827. return;
  828. /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
  829. found = false;
  830. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  831. if (table_group->tables[i] == tbl) {
  832. table_group->tables[i] = NULL;
  833. found = true;
  834. break;
  835. }
  836. }
  837. WARN_ON(!found);
  838. }
  839. void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
  840. void *tce_mem, u64 tce_size,
  841. u64 dma_offset, unsigned page_shift)
  842. {
  843. tbl->it_blocksize = 16;
  844. tbl->it_base = (unsigned long)tce_mem;
  845. tbl->it_page_shift = page_shift;
  846. tbl->it_offset = dma_offset >> tbl->it_page_shift;
  847. tbl->it_index = 0;
  848. tbl->it_size = tce_size >> 3;
  849. tbl->it_busno = 0;
  850. tbl->it_type = TCE_PCI;
  851. }
  852. void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
  853. {
  854. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  855. struct pnv_phb *phb = hose->private_data;
  856. #ifdef CONFIG_PCI_IOV
  857. struct pnv_ioda_pe *pe;
  858. struct pci_dn *pdn;
  859. /* Fix the VF pdn PE number */
  860. if (pdev->is_virtfn) {
  861. pdn = pci_get_pdn(pdev);
  862. WARN_ON(pdn->pe_number != IODA_INVALID_PE);
  863. list_for_each_entry(pe, &phb->ioda.pe_list, list) {
  864. if (pe->rid == ((pdev->bus->number << 8) |
  865. (pdev->devfn & 0xff))) {
  866. pdn->pe_number = pe->pe_number;
  867. pe->pdev = pdev;
  868. break;
  869. }
  870. }
  871. }
  872. #endif /* CONFIG_PCI_IOV */
  873. if (phb && phb->dma_dev_setup)
  874. phb->dma_dev_setup(phb, pdev);
  875. }
  876. void pnv_pci_dma_bus_setup(struct pci_bus *bus)
  877. {
  878. struct pci_controller *hose = bus->sysdata;
  879. struct pnv_phb *phb = hose->private_data;
  880. struct pnv_ioda_pe *pe;
  881. list_for_each_entry(pe, &phb->ioda.pe_list, list) {
  882. if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
  883. continue;
  884. if (!pe->pbus)
  885. continue;
  886. if (bus->number == ((pe->rid >> 8) & 0xFF)) {
  887. pe->pbus = bus;
  888. break;
  889. }
  890. }
  891. }
  892. int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc)
  893. {
  894. struct pci_controller *hose;
  895. struct pnv_phb *phb_init, *phb_target;
  896. struct pnv_ioda_pe *pe_init;
  897. int rc;
  898. if (!opal_check_token(OPAL_PCI_SET_P2P))
  899. return -ENXIO;
  900. hose = pci_bus_to_host(initiator->bus);
  901. phb_init = hose->private_data;
  902. hose = pci_bus_to_host(target->bus);
  903. phb_target = hose->private_data;
  904. pe_init = pnv_ioda_get_pe(initiator);
  905. if (!pe_init)
  906. return -ENODEV;
  907. /*
  908. * Configuring the initiator's PHB requires to adjust its
  909. * TVE#1 setting. Since the same device can be an initiator
  910. * several times for different target devices, we need to keep
  911. * a reference count to know when we can restore the default
  912. * bypass setting on its TVE#1 when disabling. Opal is not
  913. * tracking PE states, so we add a reference count on the PE
  914. * in linux.
  915. *
  916. * For the target, the configuration is per PHB, so we keep a
  917. * target reference count on the PHB.
  918. */
  919. mutex_lock(&p2p_mutex);
  920. if (desc & OPAL_PCI_P2P_ENABLE) {
  921. /* always go to opal to validate the configuration */
  922. rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id,
  923. desc, pe_init->pe_number);
  924. if (rc != OPAL_SUCCESS) {
  925. rc = -EIO;
  926. goto out;
  927. }
  928. pe_init->p2p_initiator_count++;
  929. phb_target->p2p_target_count++;
  930. } else {
  931. if (!pe_init->p2p_initiator_count ||
  932. !phb_target->p2p_target_count) {
  933. rc = -EINVAL;
  934. goto out;
  935. }
  936. if (--pe_init->p2p_initiator_count == 0)
  937. pnv_pci_ioda2_set_bypass(pe_init, true);
  938. if (--phb_target->p2p_target_count == 0) {
  939. rc = opal_pci_set_p2p(phb_init->opal_id,
  940. phb_target->opal_id, desc,
  941. pe_init->pe_number);
  942. if (rc != OPAL_SUCCESS) {
  943. rc = -EIO;
  944. goto out;
  945. }
  946. }
  947. }
  948. rc = 0;
  949. out:
  950. mutex_unlock(&p2p_mutex);
  951. return rc;
  952. }
  953. EXPORT_SYMBOL_GPL(pnv_pci_set_p2p);
  954. struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
  955. {
  956. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  957. return of_node_get(hose->dn);
  958. }
  959. EXPORT_SYMBOL(pnv_pci_get_phb_node);
  960. int pnv_pci_enable_tunnel(struct pci_dev *dev, u64 *asnind)
  961. {
  962. struct device_node *np;
  963. const __be32 *prop;
  964. struct pnv_ioda_pe *pe;
  965. uint16_t window_id;
  966. int rc;
  967. if (!radix_enabled())
  968. return -ENXIO;
  969. if (!(np = pnv_pci_get_phb_node(dev)))
  970. return -ENXIO;
  971. prop = of_get_property(np, "ibm,phb-indications", NULL);
  972. of_node_put(np);
  973. if (!prop || !prop[1])
  974. return -ENXIO;
  975. *asnind = (u64)be32_to_cpu(prop[1]);
  976. pe = pnv_ioda_get_pe(dev);
  977. if (!pe)
  978. return -ENODEV;
  979. /* Increase real window size to accept as_notify messages. */
  980. window_id = (pe->pe_number << 1 ) + 1;
  981. rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number,
  982. window_id, pe->tce_bypass_base,
  983. (uint64_t)1 << 48);
  984. return opal_error_code(rc);
  985. }
  986. EXPORT_SYMBOL_GPL(pnv_pci_enable_tunnel);
  987. int pnv_pci_disable_tunnel(struct pci_dev *dev)
  988. {
  989. struct pnv_ioda_pe *pe;
  990. pe = pnv_ioda_get_pe(dev);
  991. if (!pe)
  992. return -ENODEV;
  993. /* Restore default real window size. */
  994. pnv_pci_ioda2_set_bypass(pe, true);
  995. return 0;
  996. }
  997. EXPORT_SYMBOL_GPL(pnv_pci_disable_tunnel);
  998. int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
  999. {
  1000. __be64 val;
  1001. struct pci_controller *hose;
  1002. struct pnv_phb *phb;
  1003. u64 tunnel_bar;
  1004. int rc;
  1005. if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
  1006. return -ENXIO;
  1007. if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
  1008. return -ENXIO;
  1009. hose = pci_bus_to_host(dev->bus);
  1010. phb = hose->private_data;
  1011. mutex_lock(&tunnel_mutex);
  1012. rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
  1013. if (rc != OPAL_SUCCESS) {
  1014. rc = -EIO;
  1015. goto out;
  1016. }
  1017. tunnel_bar = be64_to_cpu(val);
  1018. if (enable) {
  1019. /*
  1020. * Only one device per PHB can use atomics.
  1021. * Our policy is first-come, first-served.
  1022. */
  1023. if (tunnel_bar) {
  1024. if (tunnel_bar != addr)
  1025. rc = -EBUSY;
  1026. else
  1027. rc = 0; /* Setting same address twice is ok */
  1028. goto out;
  1029. }
  1030. } else {
  1031. /*
  1032. * The device that owns atomics and wants to release
  1033. * them must pass the same address with enable == 0.
  1034. */
  1035. if (tunnel_bar != addr) {
  1036. rc = -EPERM;
  1037. goto out;
  1038. }
  1039. addr = 0x0ULL;
  1040. }
  1041. rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
  1042. rc = opal_error_code(rc);
  1043. out:
  1044. mutex_unlock(&tunnel_mutex);
  1045. return rc;
  1046. }
  1047. EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
  1048. #ifdef CONFIG_PPC64 /* for thread.tidr */
  1049. int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, u32 *pid,
  1050. u32 *tid)
  1051. {
  1052. struct mm_struct *mm = NULL;
  1053. if (task == NULL)
  1054. return -EINVAL;
  1055. mm = get_task_mm(task);
  1056. if (mm == NULL)
  1057. return -EINVAL;
  1058. *pid = mm->context.id;
  1059. mmput(mm);
  1060. *tid = task->thread.tidr;
  1061. *lpid = mfspr(SPRN_LPID);
  1062. return 0;
  1063. }
  1064. EXPORT_SYMBOL_GPL(pnv_pci_get_as_notify_info);
  1065. #endif
  1066. void pnv_pci_shutdown(void)
  1067. {
  1068. struct pci_controller *hose;
  1069. list_for_each_entry(hose, &hose_list, list_node)
  1070. if (hose->controller_ops.shutdown)
  1071. hose->controller_ops.shutdown(hose);
  1072. }
  1073. /* Fixup wrong class code in p7ioc and p8 root complex */
  1074. static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
  1075. {
  1076. dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  1077. }
  1078. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
  1079. void __init pnv_pci_init(void)
  1080. {
  1081. struct device_node *np;
  1082. pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
  1083. /* If we don't have OPAL, eg. in sim, just skip PCI probe */
  1084. if (!firmware_has_feature(FW_FEATURE_OPAL))
  1085. return;
  1086. /* Look for IODA IO-Hubs. */
  1087. for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
  1088. pnv_pci_init_ioda_hub(np);
  1089. }
  1090. /* Look for ioda2 built-in PHB3's */
  1091. for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
  1092. pnv_pci_init_ioda2_phb(np);
  1093. /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
  1094. for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
  1095. pnv_pci_init_ioda2_phb(np);
  1096. /* Look for NPU PHBs */
  1097. for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
  1098. pnv_pci_init_npu_phb(np);
  1099. /*
  1100. * Look for NPU2 PHBs which we treat mostly as NPU PHBs with
  1101. * the exception of TCE kill which requires an OPAL call.
  1102. */
  1103. for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
  1104. pnv_pci_init_npu_phb(np);
  1105. /* Look for NPU2 OpenCAPI PHBs */
  1106. for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
  1107. pnv_pci_init_npu2_opencapi_phb(np);
  1108. /* Configure IOMMU DMA hooks */
  1109. set_pci_dma_ops(&dma_iommu_ops);
  1110. }
  1111. machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);