pci.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. /*
  2. * Support PCI/PCIe on PowerNV platforms
  3. *
  4. * Currently supports only P5IOC2
  5. *
  6. * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/pci.h>
  15. #include <linux/delay.h>
  16. #include <linux/string.h>
  17. #include <linux/init.h>
  18. #include <linux/irq.h>
  19. #include <linux/io.h>
  20. #include <linux/msi.h>
  21. #include <linux/iommu.h>
  22. #include <asm/sections.h>
  23. #include <asm/io.h>
  24. #include <asm/prom.h>
  25. #include <asm/pci-bridge.h>
  26. #include <asm/machdep.h>
  27. #include <asm/msi_bitmap.h>
  28. #include <asm/ppc-pci.h>
  29. #include <asm/opal.h>
  30. #include <asm/iommu.h>
  31. #include <asm/tce.h>
  32. #include <asm/firmware.h>
  33. #include <asm/eeh_event.h>
  34. #include <asm/eeh.h>
  35. #include "powernv.h"
  36. #include "pci.h"
  37. /* Delay in usec */
  38. #define PCI_RESET_DELAY_US 3000000
  39. #define cfg_dbg(fmt...) do { } while(0)
  40. //#define cfg_dbg(fmt...) printk(fmt)
  41. #ifdef CONFIG_PCI_MSI
  42. static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  43. {
  44. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  45. struct pnv_phb *phb = hose->private_data;
  46. struct msi_desc *entry;
  47. struct msi_msg msg;
  48. int hwirq;
  49. unsigned int virq;
  50. int rc;
  51. if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
  52. return -ENODEV;
  53. if (pdev->no_64bit_msi && !phb->msi32_support)
  54. return -ENODEV;
  55. list_for_each_entry(entry, &pdev->msi_list, list) {
  56. if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
  57. pr_warn("%s: Supports only 64-bit MSIs\n",
  58. pci_name(pdev));
  59. return -ENXIO;
  60. }
  61. hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
  62. if (hwirq < 0) {
  63. pr_warn("%s: Failed to find a free MSI\n",
  64. pci_name(pdev));
  65. return -ENOSPC;
  66. }
  67. virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
  68. if (virq == NO_IRQ) {
  69. pr_warn("%s: Failed to map MSI to linux irq\n",
  70. pci_name(pdev));
  71. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  72. return -ENOMEM;
  73. }
  74. rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
  75. virq, entry->msi_attrib.is_64, &msg);
  76. if (rc) {
  77. pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
  78. irq_dispose_mapping(virq);
  79. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  80. return rc;
  81. }
  82. irq_set_msi_desc(virq, entry);
  83. pci_write_msi_msg(virq, &msg);
  84. }
  85. return 0;
  86. }
  87. static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
  88. {
  89. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  90. struct pnv_phb *phb = hose->private_data;
  91. struct msi_desc *entry;
  92. if (WARN_ON(!phb))
  93. return;
  94. list_for_each_entry(entry, &pdev->msi_list, list) {
  95. if (entry->irq == NO_IRQ)
  96. continue;
  97. irq_set_msi_desc(entry->irq, NULL);
  98. msi_bitmap_free_hwirqs(&phb->msi_bmp,
  99. virq_to_hw(entry->irq) - phb->msi_base, 1);
  100. irq_dispose_mapping(entry->irq);
  101. }
  102. }
  103. #endif /* CONFIG_PCI_MSI */
  104. static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
  105. struct OpalIoPhbErrorCommon *common)
  106. {
  107. struct OpalIoP7IOCPhbErrorData *data;
  108. int i;
  109. data = (struct OpalIoP7IOCPhbErrorData *)common;
  110. pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
  111. hose->global_number, be32_to_cpu(common->version));
  112. if (data->brdgCtl)
  113. pr_info("brdgCtl: %08x\n",
  114. be32_to_cpu(data->brdgCtl));
  115. if (data->portStatusReg || data->rootCmplxStatus ||
  116. data->busAgentStatus)
  117. pr_info("UtlSts: %08x %08x %08x\n",
  118. be32_to_cpu(data->portStatusReg),
  119. be32_to_cpu(data->rootCmplxStatus),
  120. be32_to_cpu(data->busAgentStatus));
  121. if (data->deviceStatus || data->slotStatus ||
  122. data->linkStatus || data->devCmdStatus ||
  123. data->devSecStatus)
  124. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  125. be32_to_cpu(data->deviceStatus),
  126. be32_to_cpu(data->slotStatus),
  127. be32_to_cpu(data->linkStatus),
  128. be32_to_cpu(data->devCmdStatus),
  129. be32_to_cpu(data->devSecStatus));
  130. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  131. data->corrErrorStatus)
  132. pr_info("RootErrSts: %08x %08x %08x\n",
  133. be32_to_cpu(data->rootErrorStatus),
  134. be32_to_cpu(data->uncorrErrorStatus),
  135. be32_to_cpu(data->corrErrorStatus));
  136. if (data->tlpHdr1 || data->tlpHdr2 ||
  137. data->tlpHdr3 || data->tlpHdr4)
  138. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  139. be32_to_cpu(data->tlpHdr1),
  140. be32_to_cpu(data->tlpHdr2),
  141. be32_to_cpu(data->tlpHdr3),
  142. be32_to_cpu(data->tlpHdr4));
  143. if (data->sourceId || data->errorClass ||
  144. data->correlator)
  145. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  146. be32_to_cpu(data->sourceId),
  147. be64_to_cpu(data->errorClass),
  148. be64_to_cpu(data->correlator));
  149. if (data->p7iocPlssr || data->p7iocCsr)
  150. pr_info("PhbSts: %016llx %016llx\n",
  151. be64_to_cpu(data->p7iocPlssr),
  152. be64_to_cpu(data->p7iocCsr));
  153. if (data->lemFir)
  154. pr_info("Lem: %016llx %016llx %016llx\n",
  155. be64_to_cpu(data->lemFir),
  156. be64_to_cpu(data->lemErrorMask),
  157. be64_to_cpu(data->lemWOF));
  158. if (data->phbErrorStatus)
  159. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  160. be64_to_cpu(data->phbErrorStatus),
  161. be64_to_cpu(data->phbFirstErrorStatus),
  162. be64_to_cpu(data->phbErrorLog0),
  163. be64_to_cpu(data->phbErrorLog1));
  164. if (data->mmioErrorStatus)
  165. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  166. be64_to_cpu(data->mmioErrorStatus),
  167. be64_to_cpu(data->mmioFirstErrorStatus),
  168. be64_to_cpu(data->mmioErrorLog0),
  169. be64_to_cpu(data->mmioErrorLog1));
  170. if (data->dma0ErrorStatus)
  171. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  172. be64_to_cpu(data->dma0ErrorStatus),
  173. be64_to_cpu(data->dma0FirstErrorStatus),
  174. be64_to_cpu(data->dma0ErrorLog0),
  175. be64_to_cpu(data->dma0ErrorLog1));
  176. if (data->dma1ErrorStatus)
  177. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  178. be64_to_cpu(data->dma1ErrorStatus),
  179. be64_to_cpu(data->dma1FirstErrorStatus),
  180. be64_to_cpu(data->dma1ErrorLog0),
  181. be64_to_cpu(data->dma1ErrorLog1));
  182. for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
  183. if ((data->pestA[i] >> 63) == 0 &&
  184. (data->pestB[i] >> 63) == 0)
  185. continue;
  186. pr_info("PE[%3d] A/B: %016llx %016llx\n",
  187. i, be64_to_cpu(data->pestA[i]),
  188. be64_to_cpu(data->pestB[i]));
  189. }
  190. }
  191. static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
  192. struct OpalIoPhbErrorCommon *common)
  193. {
  194. struct OpalIoPhb3ErrorData *data;
  195. int i;
  196. data = (struct OpalIoPhb3ErrorData*)common;
  197. pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
  198. hose->global_number, be32_to_cpu(common->version));
  199. if (data->brdgCtl)
  200. pr_info("brdgCtl: %08x\n",
  201. be32_to_cpu(data->brdgCtl));
  202. if (data->portStatusReg || data->rootCmplxStatus ||
  203. data->busAgentStatus)
  204. pr_info("UtlSts: %08x %08x %08x\n",
  205. be32_to_cpu(data->portStatusReg),
  206. be32_to_cpu(data->rootCmplxStatus),
  207. be32_to_cpu(data->busAgentStatus));
  208. if (data->deviceStatus || data->slotStatus ||
  209. data->linkStatus || data->devCmdStatus ||
  210. data->devSecStatus)
  211. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  212. be32_to_cpu(data->deviceStatus),
  213. be32_to_cpu(data->slotStatus),
  214. be32_to_cpu(data->linkStatus),
  215. be32_to_cpu(data->devCmdStatus),
  216. be32_to_cpu(data->devSecStatus));
  217. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  218. data->corrErrorStatus)
  219. pr_info("RootErrSts: %08x %08x %08x\n",
  220. be32_to_cpu(data->rootErrorStatus),
  221. be32_to_cpu(data->uncorrErrorStatus),
  222. be32_to_cpu(data->corrErrorStatus));
  223. if (data->tlpHdr1 || data->tlpHdr2 ||
  224. data->tlpHdr3 || data->tlpHdr4)
  225. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  226. be32_to_cpu(data->tlpHdr1),
  227. be32_to_cpu(data->tlpHdr2),
  228. be32_to_cpu(data->tlpHdr3),
  229. be32_to_cpu(data->tlpHdr4));
  230. if (data->sourceId || data->errorClass ||
  231. data->correlator)
  232. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  233. be32_to_cpu(data->sourceId),
  234. be64_to_cpu(data->errorClass),
  235. be64_to_cpu(data->correlator));
  236. if (data->nFir)
  237. pr_info("nFir: %016llx %016llx %016llx\n",
  238. be64_to_cpu(data->nFir),
  239. be64_to_cpu(data->nFirMask),
  240. be64_to_cpu(data->nFirWOF));
  241. if (data->phbPlssr || data->phbCsr)
  242. pr_info("PhbSts: %016llx %016llx\n",
  243. be64_to_cpu(data->phbPlssr),
  244. be64_to_cpu(data->phbCsr));
  245. if (data->lemFir)
  246. pr_info("Lem: %016llx %016llx %016llx\n",
  247. be64_to_cpu(data->lemFir),
  248. be64_to_cpu(data->lemErrorMask),
  249. be64_to_cpu(data->lemWOF));
  250. if (data->phbErrorStatus)
  251. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  252. be64_to_cpu(data->phbErrorStatus),
  253. be64_to_cpu(data->phbFirstErrorStatus),
  254. be64_to_cpu(data->phbErrorLog0),
  255. be64_to_cpu(data->phbErrorLog1));
  256. if (data->mmioErrorStatus)
  257. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  258. be64_to_cpu(data->mmioErrorStatus),
  259. be64_to_cpu(data->mmioFirstErrorStatus),
  260. be64_to_cpu(data->mmioErrorLog0),
  261. be64_to_cpu(data->mmioErrorLog1));
  262. if (data->dma0ErrorStatus)
  263. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  264. be64_to_cpu(data->dma0ErrorStatus),
  265. be64_to_cpu(data->dma0FirstErrorStatus),
  266. be64_to_cpu(data->dma0ErrorLog0),
  267. be64_to_cpu(data->dma0ErrorLog1));
  268. if (data->dma1ErrorStatus)
  269. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  270. be64_to_cpu(data->dma1ErrorStatus),
  271. be64_to_cpu(data->dma1FirstErrorStatus),
  272. be64_to_cpu(data->dma1ErrorLog0),
  273. be64_to_cpu(data->dma1ErrorLog1));
  274. for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
  275. if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
  276. (be64_to_cpu(data->pestB[i]) >> 63) == 0)
  277. continue;
  278. pr_info("PE[%3d] A/B: %016llx %016llx\n",
  279. i, be64_to_cpu(data->pestA[i]),
  280. be64_to_cpu(data->pestB[i]));
  281. }
  282. }
  283. void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
  284. unsigned char *log_buff)
  285. {
  286. struct OpalIoPhbErrorCommon *common;
  287. if (!hose || !log_buff)
  288. return;
  289. common = (struct OpalIoPhbErrorCommon *)log_buff;
  290. switch (be32_to_cpu(common->ioType)) {
  291. case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
  292. pnv_pci_dump_p7ioc_diag_data(hose, common);
  293. break;
  294. case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
  295. pnv_pci_dump_phb3_diag_data(hose, common);
  296. break;
  297. default:
  298. pr_warn("%s: Unrecognized ioType %d\n",
  299. __func__, be32_to_cpu(common->ioType));
  300. }
  301. }
  302. static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
  303. {
  304. unsigned long flags, rc;
  305. int has_diag, ret = 0;
  306. spin_lock_irqsave(&phb->lock, flags);
  307. /* Fetch PHB diag-data */
  308. rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
  309. PNV_PCI_DIAG_BUF_SIZE);
  310. has_diag = (rc == OPAL_SUCCESS);
  311. /* If PHB supports compound PE, to handle it */
  312. if (phb->unfreeze_pe) {
  313. ret = phb->unfreeze_pe(phb,
  314. pe_no,
  315. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  316. } else {
  317. rc = opal_pci_eeh_freeze_clear(phb->opal_id,
  318. pe_no,
  319. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  320. if (rc) {
  321. pr_warn("%s: Failure %ld clearing frozen "
  322. "PHB#%x-PE#%x\n",
  323. __func__, rc, phb->hose->global_number,
  324. pe_no);
  325. ret = -EIO;
  326. }
  327. }
  328. /*
  329. * For now, let's only display the diag buffer when we fail to clear
  330. * the EEH status. We'll do more sensible things later when we have
  331. * proper EEH support. We need to make sure we don't pollute ourselves
  332. * with the normal errors generated when probing empty slots
  333. */
  334. if (has_diag && ret)
  335. pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
  336. spin_unlock_irqrestore(&phb->lock, flags);
  337. }
  338. static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
  339. {
  340. struct pnv_phb *phb = pdn->phb->private_data;
  341. u8 fstate;
  342. __be16 pcierr;
  343. int pe_no;
  344. s64 rc;
  345. /*
  346. * Get the PE#. During the PCI probe stage, we might not
  347. * setup that yet. So all ER errors should be mapped to
  348. * reserved PE.
  349. */
  350. pe_no = pdn->pe_number;
  351. if (pe_no == IODA_INVALID_PE) {
  352. if (phb->type == PNV_PHB_P5IOC2)
  353. pe_no = 0;
  354. else
  355. pe_no = phb->ioda.reserved_pe;
  356. }
  357. /*
  358. * Fetch frozen state. If the PHB support compound PE,
  359. * we need handle that case.
  360. */
  361. if (phb->get_pe_state) {
  362. fstate = phb->get_pe_state(phb, pe_no);
  363. } else {
  364. rc = opal_pci_eeh_freeze_status(phb->opal_id,
  365. pe_no,
  366. &fstate,
  367. &pcierr,
  368. NULL);
  369. if (rc) {
  370. pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
  371. __func__, rc, phb->hose->global_number, pe_no);
  372. return;
  373. }
  374. }
  375. cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
  376. (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
  377. /* Clear the frozen state if applicable */
  378. if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
  379. fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
  380. fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
  381. /*
  382. * If PHB supports compound PE, freeze it for
  383. * consistency.
  384. */
  385. if (phb->freeze_pe)
  386. phb->freeze_pe(phb, pe_no);
  387. pnv_pci_handle_eeh_config(phb, pe_no);
  388. }
  389. }
  390. int pnv_pci_cfg_read(struct pci_dn *pdn,
  391. int where, int size, u32 *val)
  392. {
  393. struct pnv_phb *phb = pdn->phb->private_data;
  394. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  395. s64 rc;
  396. switch (size) {
  397. case 1: {
  398. u8 v8;
  399. rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
  400. *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
  401. break;
  402. }
  403. case 2: {
  404. __be16 v16;
  405. rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
  406. &v16);
  407. *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
  408. break;
  409. }
  410. case 4: {
  411. __be32 v32;
  412. rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
  413. *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
  414. break;
  415. }
  416. default:
  417. return PCIBIOS_FUNC_NOT_SUPPORTED;
  418. }
  419. cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  420. __func__, pdn->busno, pdn->devfn, where, size, *val);
  421. return PCIBIOS_SUCCESSFUL;
  422. }
  423. int pnv_pci_cfg_write(struct pci_dn *pdn,
  424. int where, int size, u32 val)
  425. {
  426. struct pnv_phb *phb = pdn->phb->private_data;
  427. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  428. cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  429. pdn->busno, pdn->devfn, where, size, val);
  430. switch (size) {
  431. case 1:
  432. opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
  433. break;
  434. case 2:
  435. opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
  436. break;
  437. case 4:
  438. opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
  439. break;
  440. default:
  441. return PCIBIOS_FUNC_NOT_SUPPORTED;
  442. }
  443. return PCIBIOS_SUCCESSFUL;
  444. }
  445. #if CONFIG_EEH
  446. static bool pnv_pci_cfg_check(struct pci_dn *pdn)
  447. {
  448. struct eeh_dev *edev = NULL;
  449. struct pnv_phb *phb = pdn->phb->private_data;
  450. /* EEH not enabled ? */
  451. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  452. return true;
  453. /* PE reset or device removed ? */
  454. edev = pdn->edev;
  455. if (edev) {
  456. if (edev->pe &&
  457. (edev->pe->state & EEH_PE_CFG_BLOCKED))
  458. return false;
  459. if (edev->mode & EEH_DEV_REMOVED)
  460. return false;
  461. }
  462. return true;
  463. }
  464. #else
  465. static inline pnv_pci_cfg_check(struct pci_dn *pdn)
  466. {
  467. return true;
  468. }
  469. #endif /* CONFIG_EEH */
  470. static int pnv_pci_read_config(struct pci_bus *bus,
  471. unsigned int devfn,
  472. int where, int size, u32 *val)
  473. {
  474. struct pci_dn *pdn;
  475. struct pnv_phb *phb;
  476. int ret;
  477. *val = 0xFFFFFFFF;
  478. pdn = pci_get_pdn_by_devfn(bus, devfn);
  479. if (!pdn)
  480. return PCIBIOS_DEVICE_NOT_FOUND;
  481. if (!pnv_pci_cfg_check(pdn))
  482. return PCIBIOS_DEVICE_NOT_FOUND;
  483. ret = pnv_pci_cfg_read(pdn, where, size, val);
  484. phb = pdn->phb->private_data;
  485. if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
  486. if (*val == EEH_IO_ERROR_VALUE(size) &&
  487. eeh_dev_check_failure(pdn->edev))
  488. return PCIBIOS_DEVICE_NOT_FOUND;
  489. } else {
  490. pnv_pci_config_check_eeh(pdn);
  491. }
  492. return ret;
  493. }
  494. static int pnv_pci_write_config(struct pci_bus *bus,
  495. unsigned int devfn,
  496. int where, int size, u32 val)
  497. {
  498. struct pci_dn *pdn;
  499. struct pnv_phb *phb;
  500. int ret;
  501. pdn = pci_get_pdn_by_devfn(bus, devfn);
  502. if (!pdn)
  503. return PCIBIOS_DEVICE_NOT_FOUND;
  504. if (!pnv_pci_cfg_check(pdn))
  505. return PCIBIOS_DEVICE_NOT_FOUND;
  506. ret = pnv_pci_cfg_write(pdn, where, size, val);
  507. phb = pdn->phb->private_data;
  508. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  509. pnv_pci_config_check_eeh(pdn);
  510. return ret;
  511. }
  512. struct pci_ops pnv_pci_ops = {
  513. .read = pnv_pci_read_config,
  514. .write = pnv_pci_write_config,
  515. };
  516. static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
  517. unsigned long uaddr, enum dma_data_direction direction,
  518. struct dma_attrs *attrs, bool rm)
  519. {
  520. u64 proto_tce;
  521. __be64 *tcep, *tces;
  522. u64 rpn;
  523. proto_tce = TCE_PCI_READ; // Read allowed
  524. if (direction != DMA_TO_DEVICE)
  525. proto_tce |= TCE_PCI_WRITE;
  526. tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
  527. rpn = __pa(uaddr) >> tbl->it_page_shift;
  528. while (npages--)
  529. *(tcep++) = cpu_to_be64(proto_tce |
  530. (rpn++ << tbl->it_page_shift));
  531. /* Some implementations won't cache invalid TCEs and thus may not
  532. * need that flush. We'll probably turn it_type into a bit mask
  533. * of flags if that becomes the case
  534. */
  535. if (tbl->it_type & TCE_PCI_SWINV_CREATE)
  536. pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
  537. return 0;
  538. }
  539. static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages,
  540. unsigned long uaddr,
  541. enum dma_data_direction direction,
  542. struct dma_attrs *attrs)
  543. {
  544. return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs,
  545. false);
  546. }
  547. static void pnv_tce_free(struct iommu_table *tbl, long index, long npages,
  548. bool rm)
  549. {
  550. __be64 *tcep, *tces;
  551. tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
  552. while (npages--)
  553. *(tcep++) = cpu_to_be64(0);
  554. if (tbl->it_type & TCE_PCI_SWINV_FREE)
  555. pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
  556. }
  557. static void pnv_tce_free_vm(struct iommu_table *tbl, long index, long npages)
  558. {
  559. pnv_tce_free(tbl, index, npages, false);
  560. }
  561. static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
  562. {
  563. return ((u64 *)tbl->it_base)[index - tbl->it_offset];
  564. }
  565. static int pnv_tce_build_rm(struct iommu_table *tbl, long index, long npages,
  566. unsigned long uaddr,
  567. enum dma_data_direction direction,
  568. struct dma_attrs *attrs)
  569. {
  570. return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, true);
  571. }
  572. static void pnv_tce_free_rm(struct iommu_table *tbl, long index, long npages)
  573. {
  574. pnv_tce_free(tbl, index, npages, true);
  575. }
  576. void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
  577. void *tce_mem, u64 tce_size,
  578. u64 dma_offset, unsigned page_shift)
  579. {
  580. tbl->it_blocksize = 16;
  581. tbl->it_base = (unsigned long)tce_mem;
  582. tbl->it_page_shift = page_shift;
  583. tbl->it_offset = dma_offset >> tbl->it_page_shift;
  584. tbl->it_index = 0;
  585. tbl->it_size = tce_size >> 3;
  586. tbl->it_busno = 0;
  587. tbl->it_type = TCE_PCI;
  588. }
  589. static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
  590. {
  591. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  592. struct pnv_phb *phb = hose->private_data;
  593. #ifdef CONFIG_PCI_IOV
  594. struct pnv_ioda_pe *pe;
  595. struct pci_dn *pdn;
  596. /* Fix the VF pdn PE number */
  597. if (pdev->is_virtfn) {
  598. pdn = pci_get_pdn(pdev);
  599. WARN_ON(pdn->pe_number != IODA_INVALID_PE);
  600. list_for_each_entry(pe, &phb->ioda.pe_list, list) {
  601. if (pe->rid == ((pdev->bus->number << 8) |
  602. (pdev->devfn & 0xff))) {
  603. pdn->pe_number = pe->pe_number;
  604. pe->pdev = pdev;
  605. break;
  606. }
  607. }
  608. }
  609. #endif /* CONFIG_PCI_IOV */
  610. if (phb && phb->dma_dev_setup)
  611. phb->dma_dev_setup(phb, pdev);
  612. }
  613. int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
  614. {
  615. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  616. struct pnv_phb *phb = hose->private_data;
  617. if (phb && phb->dma_set_mask)
  618. return phb->dma_set_mask(phb, pdev, dma_mask);
  619. return __dma_set_mask(&pdev->dev, dma_mask);
  620. }
  621. u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
  622. {
  623. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  624. struct pnv_phb *phb = hose->private_data;
  625. if (phb && phb->dma_get_required_mask)
  626. return phb->dma_get_required_mask(phb, pdev);
  627. return __dma_get_required_mask(&pdev->dev);
  628. }
  629. void pnv_pci_shutdown(void)
  630. {
  631. struct pci_controller *hose;
  632. list_for_each_entry(hose, &hose_list, list_node) {
  633. struct pnv_phb *phb = hose->private_data;
  634. if (phb && phb->shutdown)
  635. phb->shutdown(phb);
  636. }
  637. }
  638. /* Fixup wrong class code in p7ioc and p8 root complex */
  639. static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
  640. {
  641. dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  642. }
  643. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
  644. void __init pnv_pci_init(void)
  645. {
  646. struct device_node *np;
  647. bool found_ioda = false;
  648. pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
  649. /* If we don't have OPAL, eg. in sim, just skip PCI probe */
  650. if (!firmware_has_feature(FW_FEATURE_OPAL))
  651. return;
  652. /* Look for IODA IO-Hubs. We don't support mixing IODA
  653. * and p5ioc2 due to the need to change some global
  654. * probing flags
  655. */
  656. for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
  657. pnv_pci_init_ioda_hub(np);
  658. found_ioda = true;
  659. }
  660. /* Look for p5ioc2 IO-Hubs */
  661. if (!found_ioda)
  662. for_each_compatible_node(np, NULL, "ibm,p5ioc2")
  663. pnv_pci_init_p5ioc2_hub(np);
  664. /* Look for ioda2 built-in PHB3's */
  665. for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
  666. pnv_pci_init_ioda2_phb(np);
  667. /* Setup the linkage between OF nodes and PHBs */
  668. pci_devs_phb_init();
  669. /* Configure IOMMU DMA hooks */
  670. ppc_md.tce_build = pnv_tce_build_vm;
  671. ppc_md.tce_free = pnv_tce_free_vm;
  672. ppc_md.tce_build_rm = pnv_tce_build_rm;
  673. ppc_md.tce_free_rm = pnv_tce_free_rm;
  674. ppc_md.tce_get = pnv_tce_get;
  675. set_pci_dma_ops(&dma_iommu_ops);
  676. /* Configure MSIs */
  677. #ifdef CONFIG_PCI_MSI
  678. ppc_md.setup_msi_irqs = pnv_setup_msi_irqs;
  679. ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
  680. #endif
  681. }
  682. machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
  683. struct pci_controller_ops pnv_pci_controller_ops = {
  684. .dma_dev_setup = pnv_pci_dma_dev_setup,
  685. };