pci.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /*
  2. * Support PCI/PCIe on PowerNV platforms
  3. *
  4. * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/pci.h>
  13. #include <linux/delay.h>
  14. #include <linux/string.h>
  15. #include <linux/init.h>
  16. #include <linux/irq.h>
  17. #include <linux/io.h>
  18. #include <linux/msi.h>
  19. #include <linux/iommu.h>
  20. #include <asm/sections.h>
  21. #include <asm/io.h>
  22. #include <asm/prom.h>
  23. #include <asm/pci-bridge.h>
  24. #include <asm/machdep.h>
  25. #include <asm/msi_bitmap.h>
  26. #include <asm/ppc-pci.h>
  27. #include <asm/opal.h>
  28. #include <asm/iommu.h>
  29. #include <asm/tce.h>
  30. #include <asm/firmware.h>
  31. #include <asm/eeh_event.h>
  32. #include <asm/eeh.h>
  33. #include "powernv.h"
  34. #include "pci.h"
  35. /* Delay in usec */
  36. #define PCI_RESET_DELAY_US 3000000
  37. #define cfg_dbg(fmt...) do { } while(0)
  38. //#define cfg_dbg(fmt...) printk(fmt)
  39. #ifdef CONFIG_PCI_MSI
  40. int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  41. {
  42. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  43. struct pnv_phb *phb = hose->private_data;
  44. struct msi_desc *entry;
  45. struct msi_msg msg;
  46. int hwirq;
  47. unsigned int virq;
  48. int rc;
  49. if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
  50. return -ENODEV;
  51. if (pdev->no_64bit_msi && !phb->msi32_support)
  52. return -ENODEV;
  53. for_each_pci_msi_entry(entry, pdev) {
  54. if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
  55. pr_warn("%s: Supports only 64-bit MSIs\n",
  56. pci_name(pdev));
  57. return -ENXIO;
  58. }
  59. hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
  60. if (hwirq < 0) {
  61. pr_warn("%s: Failed to find a free MSI\n",
  62. pci_name(pdev));
  63. return -ENOSPC;
  64. }
  65. virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
  66. if (virq == NO_IRQ) {
  67. pr_warn("%s: Failed to map MSI to linux irq\n",
  68. pci_name(pdev));
  69. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  70. return -ENOMEM;
  71. }
  72. rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
  73. virq, entry->msi_attrib.is_64, &msg);
  74. if (rc) {
  75. pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
  76. irq_dispose_mapping(virq);
  77. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  78. return rc;
  79. }
  80. irq_set_msi_desc(virq, entry);
  81. pci_write_msi_msg(virq, &msg);
  82. }
  83. return 0;
  84. }
  85. void pnv_teardown_msi_irqs(struct pci_dev *pdev)
  86. {
  87. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  88. struct pnv_phb *phb = hose->private_data;
  89. struct msi_desc *entry;
  90. irq_hw_number_t hwirq;
  91. if (WARN_ON(!phb))
  92. return;
  93. for_each_pci_msi_entry(entry, pdev) {
  94. if (entry->irq == NO_IRQ)
  95. continue;
  96. hwirq = virq_to_hw(entry->irq);
  97. irq_set_msi_desc(entry->irq, NULL);
  98. irq_dispose_mapping(entry->irq);
  99. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
  100. }
  101. }
  102. #endif /* CONFIG_PCI_MSI */
  103. static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
  104. struct OpalIoPhbErrorCommon *common)
  105. {
  106. struct OpalIoP7IOCPhbErrorData *data;
  107. int i;
  108. data = (struct OpalIoP7IOCPhbErrorData *)common;
  109. pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
  110. hose->global_number, be32_to_cpu(common->version));
  111. if (data->brdgCtl)
  112. pr_info("brdgCtl: %08x\n",
  113. be32_to_cpu(data->brdgCtl));
  114. if (data->portStatusReg || data->rootCmplxStatus ||
  115. data->busAgentStatus)
  116. pr_info("UtlSts: %08x %08x %08x\n",
  117. be32_to_cpu(data->portStatusReg),
  118. be32_to_cpu(data->rootCmplxStatus),
  119. be32_to_cpu(data->busAgentStatus));
  120. if (data->deviceStatus || data->slotStatus ||
  121. data->linkStatus || data->devCmdStatus ||
  122. data->devSecStatus)
  123. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  124. be32_to_cpu(data->deviceStatus),
  125. be32_to_cpu(data->slotStatus),
  126. be32_to_cpu(data->linkStatus),
  127. be32_to_cpu(data->devCmdStatus),
  128. be32_to_cpu(data->devSecStatus));
  129. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  130. data->corrErrorStatus)
  131. pr_info("RootErrSts: %08x %08x %08x\n",
  132. be32_to_cpu(data->rootErrorStatus),
  133. be32_to_cpu(data->uncorrErrorStatus),
  134. be32_to_cpu(data->corrErrorStatus));
  135. if (data->tlpHdr1 || data->tlpHdr2 ||
  136. data->tlpHdr3 || data->tlpHdr4)
  137. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  138. be32_to_cpu(data->tlpHdr1),
  139. be32_to_cpu(data->tlpHdr2),
  140. be32_to_cpu(data->tlpHdr3),
  141. be32_to_cpu(data->tlpHdr4));
  142. if (data->sourceId || data->errorClass ||
  143. data->correlator)
  144. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  145. be32_to_cpu(data->sourceId),
  146. be64_to_cpu(data->errorClass),
  147. be64_to_cpu(data->correlator));
  148. if (data->p7iocPlssr || data->p7iocCsr)
  149. pr_info("PhbSts: %016llx %016llx\n",
  150. be64_to_cpu(data->p7iocPlssr),
  151. be64_to_cpu(data->p7iocCsr));
  152. if (data->lemFir)
  153. pr_info("Lem: %016llx %016llx %016llx\n",
  154. be64_to_cpu(data->lemFir),
  155. be64_to_cpu(data->lemErrorMask),
  156. be64_to_cpu(data->lemWOF));
  157. if (data->phbErrorStatus)
  158. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  159. be64_to_cpu(data->phbErrorStatus),
  160. be64_to_cpu(data->phbFirstErrorStatus),
  161. be64_to_cpu(data->phbErrorLog0),
  162. be64_to_cpu(data->phbErrorLog1));
  163. if (data->mmioErrorStatus)
  164. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  165. be64_to_cpu(data->mmioErrorStatus),
  166. be64_to_cpu(data->mmioFirstErrorStatus),
  167. be64_to_cpu(data->mmioErrorLog0),
  168. be64_to_cpu(data->mmioErrorLog1));
  169. if (data->dma0ErrorStatus)
  170. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  171. be64_to_cpu(data->dma0ErrorStatus),
  172. be64_to_cpu(data->dma0FirstErrorStatus),
  173. be64_to_cpu(data->dma0ErrorLog0),
  174. be64_to_cpu(data->dma0ErrorLog1));
  175. if (data->dma1ErrorStatus)
  176. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  177. be64_to_cpu(data->dma1ErrorStatus),
  178. be64_to_cpu(data->dma1FirstErrorStatus),
  179. be64_to_cpu(data->dma1ErrorLog0),
  180. be64_to_cpu(data->dma1ErrorLog1));
  181. for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
  182. if ((data->pestA[i] >> 63) == 0 &&
  183. (data->pestB[i] >> 63) == 0)
  184. continue;
  185. pr_info("PE[%3d] A/B: %016llx %016llx\n",
  186. i, be64_to_cpu(data->pestA[i]),
  187. be64_to_cpu(data->pestB[i]));
  188. }
  189. }
  190. static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
  191. struct OpalIoPhbErrorCommon *common)
  192. {
  193. struct OpalIoPhb3ErrorData *data;
  194. int i;
  195. data = (struct OpalIoPhb3ErrorData*)common;
  196. pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
  197. hose->global_number, be32_to_cpu(common->version));
  198. if (data->brdgCtl)
  199. pr_info("brdgCtl: %08x\n",
  200. be32_to_cpu(data->brdgCtl));
  201. if (data->portStatusReg || data->rootCmplxStatus ||
  202. data->busAgentStatus)
  203. pr_info("UtlSts: %08x %08x %08x\n",
  204. be32_to_cpu(data->portStatusReg),
  205. be32_to_cpu(data->rootCmplxStatus),
  206. be32_to_cpu(data->busAgentStatus));
  207. if (data->deviceStatus || data->slotStatus ||
  208. data->linkStatus || data->devCmdStatus ||
  209. data->devSecStatus)
  210. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  211. be32_to_cpu(data->deviceStatus),
  212. be32_to_cpu(data->slotStatus),
  213. be32_to_cpu(data->linkStatus),
  214. be32_to_cpu(data->devCmdStatus),
  215. be32_to_cpu(data->devSecStatus));
  216. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  217. data->corrErrorStatus)
  218. pr_info("RootErrSts: %08x %08x %08x\n",
  219. be32_to_cpu(data->rootErrorStatus),
  220. be32_to_cpu(data->uncorrErrorStatus),
  221. be32_to_cpu(data->corrErrorStatus));
  222. if (data->tlpHdr1 || data->tlpHdr2 ||
  223. data->tlpHdr3 || data->tlpHdr4)
  224. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  225. be32_to_cpu(data->tlpHdr1),
  226. be32_to_cpu(data->tlpHdr2),
  227. be32_to_cpu(data->tlpHdr3),
  228. be32_to_cpu(data->tlpHdr4));
  229. if (data->sourceId || data->errorClass ||
  230. data->correlator)
  231. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  232. be32_to_cpu(data->sourceId),
  233. be64_to_cpu(data->errorClass),
  234. be64_to_cpu(data->correlator));
  235. if (data->nFir)
  236. pr_info("nFir: %016llx %016llx %016llx\n",
  237. be64_to_cpu(data->nFir),
  238. be64_to_cpu(data->nFirMask),
  239. be64_to_cpu(data->nFirWOF));
  240. if (data->phbPlssr || data->phbCsr)
  241. pr_info("PhbSts: %016llx %016llx\n",
  242. be64_to_cpu(data->phbPlssr),
  243. be64_to_cpu(data->phbCsr));
  244. if (data->lemFir)
  245. pr_info("Lem: %016llx %016llx %016llx\n",
  246. be64_to_cpu(data->lemFir),
  247. be64_to_cpu(data->lemErrorMask),
  248. be64_to_cpu(data->lemWOF));
  249. if (data->phbErrorStatus)
  250. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  251. be64_to_cpu(data->phbErrorStatus),
  252. be64_to_cpu(data->phbFirstErrorStatus),
  253. be64_to_cpu(data->phbErrorLog0),
  254. be64_to_cpu(data->phbErrorLog1));
  255. if (data->mmioErrorStatus)
  256. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  257. be64_to_cpu(data->mmioErrorStatus),
  258. be64_to_cpu(data->mmioFirstErrorStatus),
  259. be64_to_cpu(data->mmioErrorLog0),
  260. be64_to_cpu(data->mmioErrorLog1));
  261. if (data->dma0ErrorStatus)
  262. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  263. be64_to_cpu(data->dma0ErrorStatus),
  264. be64_to_cpu(data->dma0FirstErrorStatus),
  265. be64_to_cpu(data->dma0ErrorLog0),
  266. be64_to_cpu(data->dma0ErrorLog1));
  267. if (data->dma1ErrorStatus)
  268. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  269. be64_to_cpu(data->dma1ErrorStatus),
  270. be64_to_cpu(data->dma1FirstErrorStatus),
  271. be64_to_cpu(data->dma1ErrorLog0),
  272. be64_to_cpu(data->dma1ErrorLog1));
  273. for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
  274. if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
  275. (be64_to_cpu(data->pestB[i]) >> 63) == 0)
  276. continue;
  277. pr_info("PE[%3d] A/B: %016llx %016llx\n",
  278. i, be64_to_cpu(data->pestA[i]),
  279. be64_to_cpu(data->pestB[i]));
  280. }
  281. }
  282. void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
  283. unsigned char *log_buff)
  284. {
  285. struct OpalIoPhbErrorCommon *common;
  286. if (!hose || !log_buff)
  287. return;
  288. common = (struct OpalIoPhbErrorCommon *)log_buff;
  289. switch (be32_to_cpu(common->ioType)) {
  290. case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
  291. pnv_pci_dump_p7ioc_diag_data(hose, common);
  292. break;
  293. case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
  294. pnv_pci_dump_phb3_diag_data(hose, common);
  295. break;
  296. default:
  297. pr_warn("%s: Unrecognized ioType %d\n",
  298. __func__, be32_to_cpu(common->ioType));
  299. }
  300. }
  301. static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
  302. {
  303. unsigned long flags, rc;
  304. int has_diag, ret = 0;
  305. spin_lock_irqsave(&phb->lock, flags);
  306. /* Fetch PHB diag-data */
  307. rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
  308. PNV_PCI_DIAG_BUF_SIZE);
  309. has_diag = (rc == OPAL_SUCCESS);
  310. /* If PHB supports compound PE, to handle it */
  311. if (phb->unfreeze_pe) {
  312. ret = phb->unfreeze_pe(phb,
  313. pe_no,
  314. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  315. } else {
  316. rc = opal_pci_eeh_freeze_clear(phb->opal_id,
  317. pe_no,
  318. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  319. if (rc) {
  320. pr_warn("%s: Failure %ld clearing frozen "
  321. "PHB#%x-PE#%x\n",
  322. __func__, rc, phb->hose->global_number,
  323. pe_no);
  324. ret = -EIO;
  325. }
  326. }
  327. /*
  328. * For now, let's only display the diag buffer when we fail to clear
  329. * the EEH status. We'll do more sensible things later when we have
  330. * proper EEH support. We need to make sure we don't pollute ourselves
  331. * with the normal errors generated when probing empty slots
  332. */
  333. if (has_diag && ret)
  334. pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
  335. spin_unlock_irqrestore(&phb->lock, flags);
  336. }
  337. static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
  338. {
  339. struct pnv_phb *phb = pdn->phb->private_data;
  340. u8 fstate;
  341. __be16 pcierr;
  342. int pe_no;
  343. s64 rc;
  344. /*
  345. * Get the PE#. During the PCI probe stage, we might not
  346. * setup that yet. So all ER errors should be mapped to
  347. * reserved PE.
  348. */
  349. pe_no = pdn->pe_number;
  350. if (pe_no == IODA_INVALID_PE) {
  351. if (phb->type == PNV_PHB_P5IOC2)
  352. pe_no = 0;
  353. else
  354. pe_no = phb->ioda.reserved_pe;
  355. }
  356. /*
  357. * Fetch frozen state. If the PHB support compound PE,
  358. * we need handle that case.
  359. */
  360. if (phb->get_pe_state) {
  361. fstate = phb->get_pe_state(phb, pe_no);
  362. } else {
  363. rc = opal_pci_eeh_freeze_status(phb->opal_id,
  364. pe_no,
  365. &fstate,
  366. &pcierr,
  367. NULL);
  368. if (rc) {
  369. pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
  370. __func__, rc, phb->hose->global_number, pe_no);
  371. return;
  372. }
  373. }
  374. cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
  375. (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
  376. /* Clear the frozen state if applicable */
  377. if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
  378. fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
  379. fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
  380. /*
  381. * If PHB supports compound PE, freeze it for
  382. * consistency.
  383. */
  384. if (phb->freeze_pe)
  385. phb->freeze_pe(phb, pe_no);
  386. pnv_pci_handle_eeh_config(phb, pe_no);
  387. }
  388. }
  389. int pnv_pci_cfg_read(struct pci_dn *pdn,
  390. int where, int size, u32 *val)
  391. {
  392. struct pnv_phb *phb = pdn->phb->private_data;
  393. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  394. s64 rc;
  395. switch (size) {
  396. case 1: {
  397. u8 v8;
  398. rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
  399. *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
  400. break;
  401. }
  402. case 2: {
  403. __be16 v16;
  404. rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
  405. &v16);
  406. *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
  407. break;
  408. }
  409. case 4: {
  410. __be32 v32;
  411. rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
  412. *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
  413. break;
  414. }
  415. default:
  416. return PCIBIOS_FUNC_NOT_SUPPORTED;
  417. }
  418. cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  419. __func__, pdn->busno, pdn->devfn, where, size, *val);
  420. return PCIBIOS_SUCCESSFUL;
  421. }
  422. int pnv_pci_cfg_write(struct pci_dn *pdn,
  423. int where, int size, u32 val)
  424. {
  425. struct pnv_phb *phb = pdn->phb->private_data;
  426. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  427. cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  428. pdn->busno, pdn->devfn, where, size, val);
  429. switch (size) {
  430. case 1:
  431. opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
  432. break;
  433. case 2:
  434. opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
  435. break;
  436. case 4:
  437. opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
  438. break;
  439. default:
  440. return PCIBIOS_FUNC_NOT_SUPPORTED;
  441. }
  442. return PCIBIOS_SUCCESSFUL;
  443. }
  444. #if CONFIG_EEH
  445. static bool pnv_pci_cfg_check(struct pci_dn *pdn)
  446. {
  447. struct eeh_dev *edev = NULL;
  448. struct pnv_phb *phb = pdn->phb->private_data;
  449. /* EEH not enabled ? */
  450. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  451. return true;
  452. /* PE reset or device removed ? */
  453. edev = pdn->edev;
  454. if (edev) {
  455. if (edev->pe &&
  456. (edev->pe->state & EEH_PE_CFG_BLOCKED))
  457. return false;
  458. if (edev->mode & EEH_DEV_REMOVED)
  459. return false;
  460. }
  461. return true;
  462. }
  463. #else
  464. static inline pnv_pci_cfg_check(struct pci_dn *pdn)
  465. {
  466. return true;
  467. }
  468. #endif /* CONFIG_EEH */
  469. static int pnv_pci_read_config(struct pci_bus *bus,
  470. unsigned int devfn,
  471. int where, int size, u32 *val)
  472. {
  473. struct pci_dn *pdn;
  474. struct pnv_phb *phb;
  475. int ret;
  476. *val = 0xFFFFFFFF;
  477. pdn = pci_get_pdn_by_devfn(bus, devfn);
  478. if (!pdn)
  479. return PCIBIOS_DEVICE_NOT_FOUND;
  480. if (!pnv_pci_cfg_check(pdn))
  481. return PCIBIOS_DEVICE_NOT_FOUND;
  482. ret = pnv_pci_cfg_read(pdn, where, size, val);
  483. phb = pdn->phb->private_data;
  484. if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
  485. if (*val == EEH_IO_ERROR_VALUE(size) &&
  486. eeh_dev_check_failure(pdn->edev))
  487. return PCIBIOS_DEVICE_NOT_FOUND;
  488. } else {
  489. pnv_pci_config_check_eeh(pdn);
  490. }
  491. return ret;
  492. }
  493. static int pnv_pci_write_config(struct pci_bus *bus,
  494. unsigned int devfn,
  495. int where, int size, u32 val)
  496. {
  497. struct pci_dn *pdn;
  498. struct pnv_phb *phb;
  499. int ret;
  500. pdn = pci_get_pdn_by_devfn(bus, devfn);
  501. if (!pdn)
  502. return PCIBIOS_DEVICE_NOT_FOUND;
  503. if (!pnv_pci_cfg_check(pdn))
  504. return PCIBIOS_DEVICE_NOT_FOUND;
  505. ret = pnv_pci_cfg_write(pdn, where, size, val);
  506. phb = pdn->phb->private_data;
  507. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  508. pnv_pci_config_check_eeh(pdn);
  509. return ret;
  510. }
  511. struct pci_ops pnv_pci_ops = {
  512. .read = pnv_pci_read_config,
  513. .write = pnv_pci_write_config,
  514. };
  515. static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
  516. {
  517. __be64 *tmp = ((__be64 *)tbl->it_base);
  518. int level = tbl->it_indirect_levels;
  519. const long shift = ilog2(tbl->it_level_size);
  520. unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
  521. while (level) {
  522. int n = (idx & mask) >> (level * shift);
  523. unsigned long tce = be64_to_cpu(tmp[n]);
  524. tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
  525. idx &= ~mask;
  526. mask >>= shift;
  527. --level;
  528. }
  529. return tmp + idx;
  530. }
  531. int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
  532. unsigned long uaddr, enum dma_data_direction direction,
  533. struct dma_attrs *attrs)
  534. {
  535. u64 proto_tce = iommu_direction_to_tce_perm(direction);
  536. u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
  537. long i;
  538. for (i = 0; i < npages; i++) {
  539. unsigned long newtce = proto_tce |
  540. ((rpn + i) << tbl->it_page_shift);
  541. unsigned long idx = index - tbl->it_offset + i;
  542. *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
  543. }
  544. return 0;
  545. }
  546. #ifdef CONFIG_IOMMU_API
  547. int pnv_tce_xchg(struct iommu_table *tbl, long index,
  548. unsigned long *hpa, enum dma_data_direction *direction)
  549. {
  550. u64 proto_tce = iommu_direction_to_tce_perm(*direction);
  551. unsigned long newtce = *hpa | proto_tce, oldtce;
  552. unsigned long idx = index - tbl->it_offset;
  553. BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
  554. oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
  555. *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
  556. *direction = iommu_tce_direction(oldtce);
  557. return 0;
  558. }
  559. #endif
  560. void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
  561. {
  562. long i;
  563. for (i = 0; i < npages; i++) {
  564. unsigned long idx = index - tbl->it_offset + i;
  565. *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
  566. }
  567. }
  568. unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
  569. {
  570. return *(pnv_tce(tbl, index - tbl->it_offset));
  571. }
  572. struct iommu_table *pnv_pci_table_alloc(int nid)
  573. {
  574. struct iommu_table *tbl;
  575. tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
  576. INIT_LIST_HEAD_RCU(&tbl->it_group_list);
  577. return tbl;
  578. }
  579. long pnv_pci_link_table_and_group(int node, int num,
  580. struct iommu_table *tbl,
  581. struct iommu_table_group *table_group)
  582. {
  583. struct iommu_table_group_link *tgl = NULL;
  584. if (WARN_ON(!tbl || !table_group))
  585. return -EINVAL;
  586. tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
  587. node);
  588. if (!tgl)
  589. return -ENOMEM;
  590. tgl->table_group = table_group;
  591. list_add_rcu(&tgl->next, &tbl->it_group_list);
  592. table_group->tables[num] = tbl;
  593. return 0;
  594. }
  595. static void pnv_iommu_table_group_link_free(struct rcu_head *head)
  596. {
  597. struct iommu_table_group_link *tgl = container_of(head,
  598. struct iommu_table_group_link, rcu);
  599. kfree(tgl);
  600. }
  601. void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
  602. struct iommu_table_group *table_group)
  603. {
  604. long i;
  605. bool found;
  606. struct iommu_table_group_link *tgl;
  607. if (!tbl || !table_group)
  608. return;
  609. /* Remove link to a group from table's list of attached groups */
  610. found = false;
  611. list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
  612. if (tgl->table_group == table_group) {
  613. list_del_rcu(&tgl->next);
  614. call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
  615. found = true;
  616. break;
  617. }
  618. }
  619. if (WARN_ON(!found))
  620. return;
  621. /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
  622. found = false;
  623. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  624. if (table_group->tables[i] == tbl) {
  625. table_group->tables[i] = NULL;
  626. found = true;
  627. break;
  628. }
  629. }
  630. WARN_ON(!found);
  631. }
  632. void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
  633. void *tce_mem, u64 tce_size,
  634. u64 dma_offset, unsigned page_shift)
  635. {
  636. tbl->it_blocksize = 16;
  637. tbl->it_base = (unsigned long)tce_mem;
  638. tbl->it_page_shift = page_shift;
  639. tbl->it_offset = dma_offset >> tbl->it_page_shift;
  640. tbl->it_index = 0;
  641. tbl->it_size = tce_size >> 3;
  642. tbl->it_busno = 0;
  643. tbl->it_type = TCE_PCI;
  644. }
  645. void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
  646. {
  647. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  648. struct pnv_phb *phb = hose->private_data;
  649. #ifdef CONFIG_PCI_IOV
  650. struct pnv_ioda_pe *pe;
  651. struct pci_dn *pdn;
  652. /* Fix the VF pdn PE number */
  653. if (pdev->is_virtfn) {
  654. pdn = pci_get_pdn(pdev);
  655. WARN_ON(pdn->pe_number != IODA_INVALID_PE);
  656. list_for_each_entry(pe, &phb->ioda.pe_list, list) {
  657. if (pe->rid == ((pdev->bus->number << 8) |
  658. (pdev->devfn & 0xff))) {
  659. pdn->pe_number = pe->pe_number;
  660. pe->pdev = pdev;
  661. break;
  662. }
  663. }
  664. }
  665. #endif /* CONFIG_PCI_IOV */
  666. if (phb && phb->dma_dev_setup)
  667. phb->dma_dev_setup(phb, pdev);
  668. }
  669. void pnv_pci_shutdown(void)
  670. {
  671. struct pci_controller *hose;
  672. list_for_each_entry(hose, &hose_list, list_node)
  673. if (hose->controller_ops.shutdown)
  674. hose->controller_ops.shutdown(hose);
  675. }
  676. /* Fixup wrong class code in p7ioc and p8 root complex */
  677. static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
  678. {
  679. dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  680. }
  681. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
  682. void __init pnv_pci_init(void)
  683. {
  684. struct device_node *np;
  685. bool found_ioda = false;
  686. pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
  687. /* If we don't have OPAL, eg. in sim, just skip PCI probe */
  688. if (!firmware_has_feature(FW_FEATURE_OPAL))
  689. return;
  690. /* Look for IODA IO-Hubs. We don't support mixing IODA
  691. * and p5ioc2 due to the need to change some global
  692. * probing flags
  693. */
  694. for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
  695. pnv_pci_init_ioda_hub(np);
  696. found_ioda = true;
  697. }
  698. /* Look for p5ioc2 IO-Hubs */
  699. if (!found_ioda)
  700. for_each_compatible_node(np, NULL, "ibm,p5ioc2")
  701. pnv_pci_init_p5ioc2_hub(np);
  702. /* Look for ioda2 built-in PHB3's */
  703. for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
  704. pnv_pci_init_ioda2_phb(np);
  705. /* Look for NPU PHBs */
  706. for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
  707. pnv_pci_init_npu_phb(np);
  708. /* Setup the linkage between OF nodes and PHBs */
  709. pci_devs_phb_init();
  710. /* Configure IOMMU DMA hooks */
  711. set_pci_dma_ops(&dma_iommu_ops);
  712. }
  713. machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);