pci.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. /*
  2. * Support PCI/PCIe on PowerNV platforms
  3. *
  4. * Currently supports only P5IOC2
  5. *
  6. * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/pci.h>
  15. #include <linux/delay.h>
  16. #include <linux/string.h>
  17. #include <linux/init.h>
  18. #include <linux/irq.h>
  19. #include <linux/io.h>
  20. #include <linux/msi.h>
  21. #include <linux/iommu.h>
  22. #include <asm/sections.h>
  23. #include <asm/io.h>
  24. #include <asm/prom.h>
  25. #include <asm/pci-bridge.h>
  26. #include <asm/machdep.h>
  27. #include <asm/msi_bitmap.h>
  28. #include <asm/ppc-pci.h>
  29. #include <asm/opal.h>
  30. #include <asm/iommu.h>
  31. #include <asm/tce.h>
  32. #include <asm/firmware.h>
  33. #include <asm/eeh_event.h>
  34. #include <asm/eeh.h>
  35. #include "powernv.h"
  36. #include "pci.h"
  37. /* Delay in usec */
  38. #define PCI_RESET_DELAY_US 3000000
  39. #define cfg_dbg(fmt...) do { } while(0)
  40. //#define cfg_dbg(fmt...) printk(fmt)
  41. #ifdef CONFIG_PCI_MSI
  42. int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  43. {
  44. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  45. struct pnv_phb *phb = hose->private_data;
  46. struct msi_desc *entry;
  47. struct msi_msg msg;
  48. int hwirq;
  49. unsigned int virq;
  50. int rc;
  51. if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
  52. return -ENODEV;
  53. if (pdev->no_64bit_msi && !phb->msi32_support)
  54. return -ENODEV;
  55. list_for_each_entry(entry, &pdev->msi_list, list) {
  56. if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
  57. pr_warn("%s: Supports only 64-bit MSIs\n",
  58. pci_name(pdev));
  59. return -ENXIO;
  60. }
  61. hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
  62. if (hwirq < 0) {
  63. pr_warn("%s: Failed to find a free MSI\n",
  64. pci_name(pdev));
  65. return -ENOSPC;
  66. }
  67. virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
  68. if (virq == NO_IRQ) {
  69. pr_warn("%s: Failed to map MSI to linux irq\n",
  70. pci_name(pdev));
  71. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  72. return -ENOMEM;
  73. }
  74. rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
  75. virq, entry->msi_attrib.is_64, &msg);
  76. if (rc) {
  77. pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
  78. irq_dispose_mapping(virq);
  79. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  80. return rc;
  81. }
  82. irq_set_msi_desc(virq, entry);
  83. pci_write_msi_msg(virq, &msg);
  84. }
  85. return 0;
  86. }
  87. void pnv_teardown_msi_irqs(struct pci_dev *pdev)
  88. {
  89. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  90. struct pnv_phb *phb = hose->private_data;
  91. struct msi_desc *entry;
  92. if (WARN_ON(!phb))
  93. return;
  94. list_for_each_entry(entry, &pdev->msi_list, list) {
  95. if (entry->irq == NO_IRQ)
  96. continue;
  97. irq_set_msi_desc(entry->irq, NULL);
  98. msi_bitmap_free_hwirqs(&phb->msi_bmp,
  99. virq_to_hw(entry->irq) - phb->msi_base, 1);
  100. irq_dispose_mapping(entry->irq);
  101. }
  102. }
  103. #endif /* CONFIG_PCI_MSI */
  104. static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
  105. struct OpalIoPhbErrorCommon *common)
  106. {
  107. struct OpalIoP7IOCPhbErrorData *data;
  108. int i;
  109. data = (struct OpalIoP7IOCPhbErrorData *)common;
  110. pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
  111. hose->global_number, be32_to_cpu(common->version));
  112. if (data->brdgCtl)
  113. pr_info("brdgCtl: %08x\n",
  114. be32_to_cpu(data->brdgCtl));
  115. if (data->portStatusReg || data->rootCmplxStatus ||
  116. data->busAgentStatus)
  117. pr_info("UtlSts: %08x %08x %08x\n",
  118. be32_to_cpu(data->portStatusReg),
  119. be32_to_cpu(data->rootCmplxStatus),
  120. be32_to_cpu(data->busAgentStatus));
  121. if (data->deviceStatus || data->slotStatus ||
  122. data->linkStatus || data->devCmdStatus ||
  123. data->devSecStatus)
  124. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  125. be32_to_cpu(data->deviceStatus),
  126. be32_to_cpu(data->slotStatus),
  127. be32_to_cpu(data->linkStatus),
  128. be32_to_cpu(data->devCmdStatus),
  129. be32_to_cpu(data->devSecStatus));
  130. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  131. data->corrErrorStatus)
  132. pr_info("RootErrSts: %08x %08x %08x\n",
  133. be32_to_cpu(data->rootErrorStatus),
  134. be32_to_cpu(data->uncorrErrorStatus),
  135. be32_to_cpu(data->corrErrorStatus));
  136. if (data->tlpHdr1 || data->tlpHdr2 ||
  137. data->tlpHdr3 || data->tlpHdr4)
  138. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  139. be32_to_cpu(data->tlpHdr1),
  140. be32_to_cpu(data->tlpHdr2),
  141. be32_to_cpu(data->tlpHdr3),
  142. be32_to_cpu(data->tlpHdr4));
  143. if (data->sourceId || data->errorClass ||
  144. data->correlator)
  145. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  146. be32_to_cpu(data->sourceId),
  147. be64_to_cpu(data->errorClass),
  148. be64_to_cpu(data->correlator));
  149. if (data->p7iocPlssr || data->p7iocCsr)
  150. pr_info("PhbSts: %016llx %016llx\n",
  151. be64_to_cpu(data->p7iocPlssr),
  152. be64_to_cpu(data->p7iocCsr));
  153. if (data->lemFir)
  154. pr_info("Lem: %016llx %016llx %016llx\n",
  155. be64_to_cpu(data->lemFir),
  156. be64_to_cpu(data->lemErrorMask),
  157. be64_to_cpu(data->lemWOF));
  158. if (data->phbErrorStatus)
  159. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  160. be64_to_cpu(data->phbErrorStatus),
  161. be64_to_cpu(data->phbFirstErrorStatus),
  162. be64_to_cpu(data->phbErrorLog0),
  163. be64_to_cpu(data->phbErrorLog1));
  164. if (data->mmioErrorStatus)
  165. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  166. be64_to_cpu(data->mmioErrorStatus),
  167. be64_to_cpu(data->mmioFirstErrorStatus),
  168. be64_to_cpu(data->mmioErrorLog0),
  169. be64_to_cpu(data->mmioErrorLog1));
  170. if (data->dma0ErrorStatus)
  171. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  172. be64_to_cpu(data->dma0ErrorStatus),
  173. be64_to_cpu(data->dma0FirstErrorStatus),
  174. be64_to_cpu(data->dma0ErrorLog0),
  175. be64_to_cpu(data->dma0ErrorLog1));
  176. if (data->dma1ErrorStatus)
  177. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  178. be64_to_cpu(data->dma1ErrorStatus),
  179. be64_to_cpu(data->dma1FirstErrorStatus),
  180. be64_to_cpu(data->dma1ErrorLog0),
  181. be64_to_cpu(data->dma1ErrorLog1));
  182. for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
  183. if ((data->pestA[i] >> 63) == 0 &&
  184. (data->pestB[i] >> 63) == 0)
  185. continue;
  186. pr_info("PE[%3d] A/B: %016llx %016llx\n",
  187. i, be64_to_cpu(data->pestA[i]),
  188. be64_to_cpu(data->pestB[i]));
  189. }
  190. }
  191. static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
  192. struct OpalIoPhbErrorCommon *common)
  193. {
  194. struct OpalIoPhb3ErrorData *data;
  195. int i;
  196. data = (struct OpalIoPhb3ErrorData*)common;
  197. pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
  198. hose->global_number, be32_to_cpu(common->version));
  199. if (data->brdgCtl)
  200. pr_info("brdgCtl: %08x\n",
  201. be32_to_cpu(data->brdgCtl));
  202. if (data->portStatusReg || data->rootCmplxStatus ||
  203. data->busAgentStatus)
  204. pr_info("UtlSts: %08x %08x %08x\n",
  205. be32_to_cpu(data->portStatusReg),
  206. be32_to_cpu(data->rootCmplxStatus),
  207. be32_to_cpu(data->busAgentStatus));
  208. if (data->deviceStatus || data->slotStatus ||
  209. data->linkStatus || data->devCmdStatus ||
  210. data->devSecStatus)
  211. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  212. be32_to_cpu(data->deviceStatus),
  213. be32_to_cpu(data->slotStatus),
  214. be32_to_cpu(data->linkStatus),
  215. be32_to_cpu(data->devCmdStatus),
  216. be32_to_cpu(data->devSecStatus));
  217. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  218. data->corrErrorStatus)
  219. pr_info("RootErrSts: %08x %08x %08x\n",
  220. be32_to_cpu(data->rootErrorStatus),
  221. be32_to_cpu(data->uncorrErrorStatus),
  222. be32_to_cpu(data->corrErrorStatus));
  223. if (data->tlpHdr1 || data->tlpHdr2 ||
  224. data->tlpHdr3 || data->tlpHdr4)
  225. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  226. be32_to_cpu(data->tlpHdr1),
  227. be32_to_cpu(data->tlpHdr2),
  228. be32_to_cpu(data->tlpHdr3),
  229. be32_to_cpu(data->tlpHdr4));
  230. if (data->sourceId || data->errorClass ||
  231. data->correlator)
  232. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  233. be32_to_cpu(data->sourceId),
  234. be64_to_cpu(data->errorClass),
  235. be64_to_cpu(data->correlator));
  236. if (data->nFir)
  237. pr_info("nFir: %016llx %016llx %016llx\n",
  238. be64_to_cpu(data->nFir),
  239. be64_to_cpu(data->nFirMask),
  240. be64_to_cpu(data->nFirWOF));
  241. if (data->phbPlssr || data->phbCsr)
  242. pr_info("PhbSts: %016llx %016llx\n",
  243. be64_to_cpu(data->phbPlssr),
  244. be64_to_cpu(data->phbCsr));
  245. if (data->lemFir)
  246. pr_info("Lem: %016llx %016llx %016llx\n",
  247. be64_to_cpu(data->lemFir),
  248. be64_to_cpu(data->lemErrorMask),
  249. be64_to_cpu(data->lemWOF));
  250. if (data->phbErrorStatus)
  251. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  252. be64_to_cpu(data->phbErrorStatus),
  253. be64_to_cpu(data->phbFirstErrorStatus),
  254. be64_to_cpu(data->phbErrorLog0),
  255. be64_to_cpu(data->phbErrorLog1));
  256. if (data->mmioErrorStatus)
  257. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  258. be64_to_cpu(data->mmioErrorStatus),
  259. be64_to_cpu(data->mmioFirstErrorStatus),
  260. be64_to_cpu(data->mmioErrorLog0),
  261. be64_to_cpu(data->mmioErrorLog1));
  262. if (data->dma0ErrorStatus)
  263. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  264. be64_to_cpu(data->dma0ErrorStatus),
  265. be64_to_cpu(data->dma0FirstErrorStatus),
  266. be64_to_cpu(data->dma0ErrorLog0),
  267. be64_to_cpu(data->dma0ErrorLog1));
  268. if (data->dma1ErrorStatus)
  269. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  270. be64_to_cpu(data->dma1ErrorStatus),
  271. be64_to_cpu(data->dma1FirstErrorStatus),
  272. be64_to_cpu(data->dma1ErrorLog0),
  273. be64_to_cpu(data->dma1ErrorLog1));
  274. for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
  275. if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
  276. (be64_to_cpu(data->pestB[i]) >> 63) == 0)
  277. continue;
  278. pr_info("PE[%3d] A/B: %016llx %016llx\n",
  279. i, be64_to_cpu(data->pestA[i]),
  280. be64_to_cpu(data->pestB[i]));
  281. }
  282. }
  283. void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
  284. unsigned char *log_buff)
  285. {
  286. struct OpalIoPhbErrorCommon *common;
  287. if (!hose || !log_buff)
  288. return;
  289. common = (struct OpalIoPhbErrorCommon *)log_buff;
  290. switch (be32_to_cpu(common->ioType)) {
  291. case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
  292. pnv_pci_dump_p7ioc_diag_data(hose, common);
  293. break;
  294. case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
  295. pnv_pci_dump_phb3_diag_data(hose, common);
  296. break;
  297. default:
  298. pr_warn("%s: Unrecognized ioType %d\n",
  299. __func__, be32_to_cpu(common->ioType));
  300. }
  301. }
  302. static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
  303. {
  304. unsigned long flags, rc;
  305. int has_diag, ret = 0;
  306. spin_lock_irqsave(&phb->lock, flags);
  307. /* Fetch PHB diag-data */
  308. rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
  309. PNV_PCI_DIAG_BUF_SIZE);
  310. has_diag = (rc == OPAL_SUCCESS);
  311. /* If PHB supports compound PE, to handle it */
  312. if (phb->unfreeze_pe) {
  313. ret = phb->unfreeze_pe(phb,
  314. pe_no,
  315. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  316. } else {
  317. rc = opal_pci_eeh_freeze_clear(phb->opal_id,
  318. pe_no,
  319. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  320. if (rc) {
  321. pr_warn("%s: Failure %ld clearing frozen "
  322. "PHB#%x-PE#%x\n",
  323. __func__, rc, phb->hose->global_number,
  324. pe_no);
  325. ret = -EIO;
  326. }
  327. }
  328. /*
  329. * For now, let's only display the diag buffer when we fail to clear
  330. * the EEH status. We'll do more sensible things later when we have
  331. * proper EEH support. We need to make sure we don't pollute ourselves
  332. * with the normal errors generated when probing empty slots
  333. */
  334. if (has_diag && ret)
  335. pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
  336. spin_unlock_irqrestore(&phb->lock, flags);
  337. }
  338. static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
  339. {
  340. struct pnv_phb *phb = pdn->phb->private_data;
  341. u8 fstate;
  342. __be16 pcierr;
  343. int pe_no;
  344. s64 rc;
  345. /*
  346. * Get the PE#. During the PCI probe stage, we might not
  347. * setup that yet. So all ER errors should be mapped to
  348. * reserved PE.
  349. */
  350. pe_no = pdn->pe_number;
  351. if (pe_no == IODA_INVALID_PE) {
  352. if (phb->type == PNV_PHB_P5IOC2)
  353. pe_no = 0;
  354. else
  355. pe_no = phb->ioda.reserved_pe;
  356. }
  357. /*
  358. * Fetch frozen state. If the PHB support compound PE,
  359. * we need handle that case.
  360. */
  361. if (phb->get_pe_state) {
  362. fstate = phb->get_pe_state(phb, pe_no);
  363. } else {
  364. rc = opal_pci_eeh_freeze_status(phb->opal_id,
  365. pe_no,
  366. &fstate,
  367. &pcierr,
  368. NULL);
  369. if (rc) {
  370. pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
  371. __func__, rc, phb->hose->global_number, pe_no);
  372. return;
  373. }
  374. }
  375. cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
  376. (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
  377. /* Clear the frozen state if applicable */
  378. if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
  379. fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
  380. fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
  381. /*
  382. * If PHB supports compound PE, freeze it for
  383. * consistency.
  384. */
  385. if (phb->freeze_pe)
  386. phb->freeze_pe(phb, pe_no);
  387. pnv_pci_handle_eeh_config(phb, pe_no);
  388. }
  389. }
  390. int pnv_pci_cfg_read(struct pci_dn *pdn,
  391. int where, int size, u32 *val)
  392. {
  393. struct pnv_phb *phb = pdn->phb->private_data;
  394. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  395. s64 rc;
  396. switch (size) {
  397. case 1: {
  398. u8 v8;
  399. rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
  400. *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
  401. break;
  402. }
  403. case 2: {
  404. __be16 v16;
  405. rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
  406. &v16);
  407. *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
  408. break;
  409. }
  410. case 4: {
  411. __be32 v32;
  412. rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
  413. *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
  414. break;
  415. }
  416. default:
  417. return PCIBIOS_FUNC_NOT_SUPPORTED;
  418. }
  419. cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  420. __func__, pdn->busno, pdn->devfn, where, size, *val);
  421. return PCIBIOS_SUCCESSFUL;
  422. }
  423. int pnv_pci_cfg_write(struct pci_dn *pdn,
  424. int where, int size, u32 val)
  425. {
  426. struct pnv_phb *phb = pdn->phb->private_data;
  427. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  428. cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  429. pdn->busno, pdn->devfn, where, size, val);
  430. switch (size) {
  431. case 1:
  432. opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
  433. break;
  434. case 2:
  435. opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
  436. break;
  437. case 4:
  438. opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
  439. break;
  440. default:
  441. return PCIBIOS_FUNC_NOT_SUPPORTED;
  442. }
  443. return PCIBIOS_SUCCESSFUL;
  444. }
  445. #if CONFIG_EEH
  446. static bool pnv_pci_cfg_check(struct pci_dn *pdn)
  447. {
  448. struct eeh_dev *edev = NULL;
  449. struct pnv_phb *phb = pdn->phb->private_data;
  450. /* EEH not enabled ? */
  451. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  452. return true;
  453. /* PE reset or device removed ? */
  454. edev = pdn->edev;
  455. if (edev) {
  456. if (edev->pe &&
  457. (edev->pe->state & EEH_PE_CFG_BLOCKED))
  458. return false;
  459. if (edev->mode & EEH_DEV_REMOVED)
  460. return false;
  461. }
  462. return true;
  463. }
  464. #else
  465. static inline pnv_pci_cfg_check(struct pci_dn *pdn)
  466. {
  467. return true;
  468. }
  469. #endif /* CONFIG_EEH */
  470. static int pnv_pci_read_config(struct pci_bus *bus,
  471. unsigned int devfn,
  472. int where, int size, u32 *val)
  473. {
  474. struct pci_dn *pdn;
  475. struct pnv_phb *phb;
  476. int ret;
  477. *val = 0xFFFFFFFF;
  478. pdn = pci_get_pdn_by_devfn(bus, devfn);
  479. if (!pdn)
  480. return PCIBIOS_DEVICE_NOT_FOUND;
  481. if (!pnv_pci_cfg_check(pdn))
  482. return PCIBIOS_DEVICE_NOT_FOUND;
  483. ret = pnv_pci_cfg_read(pdn, where, size, val);
  484. phb = pdn->phb->private_data;
  485. if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
  486. if (*val == EEH_IO_ERROR_VALUE(size) &&
  487. eeh_dev_check_failure(pdn->edev))
  488. return PCIBIOS_DEVICE_NOT_FOUND;
  489. } else {
  490. pnv_pci_config_check_eeh(pdn);
  491. }
  492. return ret;
  493. }
  494. static int pnv_pci_write_config(struct pci_bus *bus,
  495. unsigned int devfn,
  496. int where, int size, u32 val)
  497. {
  498. struct pci_dn *pdn;
  499. struct pnv_phb *phb;
  500. int ret;
  501. pdn = pci_get_pdn_by_devfn(bus, devfn);
  502. if (!pdn)
  503. return PCIBIOS_DEVICE_NOT_FOUND;
  504. if (!pnv_pci_cfg_check(pdn))
  505. return PCIBIOS_DEVICE_NOT_FOUND;
  506. ret = pnv_pci_cfg_write(pdn, where, size, val);
  507. phb = pdn->phb->private_data;
  508. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  509. pnv_pci_config_check_eeh(pdn);
  510. return ret;
  511. }
  512. struct pci_ops pnv_pci_ops = {
  513. .read = pnv_pci_read_config,
  514. .write = pnv_pci_write_config,
  515. };
  516. static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
  517. {
  518. __be64 *tmp = ((__be64 *)tbl->it_base);
  519. int level = tbl->it_indirect_levels;
  520. const long shift = ilog2(tbl->it_level_size);
  521. unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
  522. while (level) {
  523. int n = (idx & mask) >> (level * shift);
  524. unsigned long tce = be64_to_cpu(tmp[n]);
  525. tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
  526. idx &= ~mask;
  527. mask >>= shift;
  528. --level;
  529. }
  530. return tmp + idx;
  531. }
  532. int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
  533. unsigned long uaddr, enum dma_data_direction direction,
  534. struct dma_attrs *attrs)
  535. {
  536. u64 proto_tce = iommu_direction_to_tce_perm(direction);
  537. u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
  538. long i;
  539. for (i = 0; i < npages; i++) {
  540. unsigned long newtce = proto_tce |
  541. ((rpn + i) << tbl->it_page_shift);
  542. unsigned long idx = index - tbl->it_offset + i;
  543. *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
  544. }
  545. return 0;
  546. }
  547. #ifdef CONFIG_IOMMU_API
  548. int pnv_tce_xchg(struct iommu_table *tbl, long index,
  549. unsigned long *hpa, enum dma_data_direction *direction)
  550. {
  551. u64 proto_tce = iommu_direction_to_tce_perm(*direction);
  552. unsigned long newtce = *hpa | proto_tce, oldtce;
  553. unsigned long idx = index - tbl->it_offset;
  554. BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
  555. oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
  556. *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
  557. *direction = iommu_tce_direction(oldtce);
  558. return 0;
  559. }
  560. #endif
  561. void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
  562. {
  563. long i;
  564. for (i = 0; i < npages; i++) {
  565. unsigned long idx = index - tbl->it_offset + i;
  566. *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
  567. }
  568. }
  569. unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
  570. {
  571. return *(pnv_tce(tbl, index - tbl->it_offset));
  572. }
  573. struct iommu_table *pnv_pci_table_alloc(int nid)
  574. {
  575. struct iommu_table *tbl;
  576. tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
  577. INIT_LIST_HEAD_RCU(&tbl->it_group_list);
  578. return tbl;
  579. }
  580. long pnv_pci_link_table_and_group(int node, int num,
  581. struct iommu_table *tbl,
  582. struct iommu_table_group *table_group)
  583. {
  584. struct iommu_table_group_link *tgl = NULL;
  585. if (WARN_ON(!tbl || !table_group))
  586. return -EINVAL;
  587. tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
  588. node);
  589. if (!tgl)
  590. return -ENOMEM;
  591. tgl->table_group = table_group;
  592. list_add_rcu(&tgl->next, &tbl->it_group_list);
  593. table_group->tables[num] = tbl;
  594. return 0;
  595. }
  596. static void pnv_iommu_table_group_link_free(struct rcu_head *head)
  597. {
  598. struct iommu_table_group_link *tgl = container_of(head,
  599. struct iommu_table_group_link, rcu);
  600. kfree(tgl);
  601. }
  602. void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
  603. struct iommu_table_group *table_group)
  604. {
  605. long i;
  606. bool found;
  607. struct iommu_table_group_link *tgl;
  608. if (!tbl || !table_group)
  609. return;
  610. /* Remove link to a group from table's list of attached groups */
  611. found = false;
  612. list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
  613. if (tgl->table_group == table_group) {
  614. list_del_rcu(&tgl->next);
  615. call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
  616. found = true;
  617. break;
  618. }
  619. }
  620. if (WARN_ON(!found))
  621. return;
  622. /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
  623. found = false;
  624. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  625. if (table_group->tables[i] == tbl) {
  626. table_group->tables[i] = NULL;
  627. found = true;
  628. break;
  629. }
  630. }
  631. WARN_ON(!found);
  632. }
  633. void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
  634. void *tce_mem, u64 tce_size,
  635. u64 dma_offset, unsigned page_shift)
  636. {
  637. tbl->it_blocksize = 16;
  638. tbl->it_base = (unsigned long)tce_mem;
  639. tbl->it_page_shift = page_shift;
  640. tbl->it_offset = dma_offset >> tbl->it_page_shift;
  641. tbl->it_index = 0;
  642. tbl->it_size = tce_size >> 3;
  643. tbl->it_busno = 0;
  644. tbl->it_type = TCE_PCI;
  645. }
  646. void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
  647. {
  648. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  649. struct pnv_phb *phb = hose->private_data;
  650. #ifdef CONFIG_PCI_IOV
  651. struct pnv_ioda_pe *pe;
  652. struct pci_dn *pdn;
  653. /* Fix the VF pdn PE number */
  654. if (pdev->is_virtfn) {
  655. pdn = pci_get_pdn(pdev);
  656. WARN_ON(pdn->pe_number != IODA_INVALID_PE);
  657. list_for_each_entry(pe, &phb->ioda.pe_list, list) {
  658. if (pe->rid == ((pdev->bus->number << 8) |
  659. (pdev->devfn & 0xff))) {
  660. pdn->pe_number = pe->pe_number;
  661. pe->pdev = pdev;
  662. break;
  663. }
  664. }
  665. }
  666. #endif /* CONFIG_PCI_IOV */
  667. if (phb && phb->dma_dev_setup)
  668. phb->dma_dev_setup(phb, pdev);
  669. }
  670. u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
  671. {
  672. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  673. struct pnv_phb *phb = hose->private_data;
  674. if (phb && phb->dma_get_required_mask)
  675. return phb->dma_get_required_mask(phb, pdev);
  676. return __dma_get_required_mask(&pdev->dev);
  677. }
  678. void pnv_pci_shutdown(void)
  679. {
  680. struct pci_controller *hose;
  681. list_for_each_entry(hose, &hose_list, list_node)
  682. if (hose->controller_ops.shutdown)
  683. hose->controller_ops.shutdown(hose);
  684. }
  685. /* Fixup wrong class code in p7ioc and p8 root complex */
  686. static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
  687. {
  688. dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  689. }
  690. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
  691. void __init pnv_pci_init(void)
  692. {
  693. struct device_node *np;
  694. bool found_ioda = false;
  695. pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
  696. /* If we don't have OPAL, eg. in sim, just skip PCI probe */
  697. if (!firmware_has_feature(FW_FEATURE_OPAL))
  698. return;
  699. /* Look for IODA IO-Hubs. We don't support mixing IODA
  700. * and p5ioc2 due to the need to change some global
  701. * probing flags
  702. */
  703. for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
  704. pnv_pci_init_ioda_hub(np);
  705. found_ioda = true;
  706. }
  707. /* Look for p5ioc2 IO-Hubs */
  708. if (!found_ioda)
  709. for_each_compatible_node(np, NULL, "ibm,p5ioc2")
  710. pnv_pci_init_p5ioc2_hub(np);
  711. /* Look for ioda2 built-in PHB3's */
  712. for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
  713. pnv_pci_init_ioda2_phb(np);
  714. /* Setup the linkage between OF nodes and PHBs */
  715. pci_devs_phb_init();
  716. /* Configure IOMMU DMA hooks */
  717. set_pci_dma_ops(&dma_iommu_ops);
  718. }
  719. machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);