pci.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. /*
  2. * Support PCI/PCIe on PowerNV platforms
  3. *
  4. * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/pci.h>
  13. #include <linux/delay.h>
  14. #include <linux/string.h>
  15. #include <linux/init.h>
  16. #include <linux/irq.h>
  17. #include <linux/io.h>
  18. #include <linux/msi.h>
  19. #include <linux/iommu.h>
  20. #include <linux/sched/mm.h>
  21. #include <asm/sections.h>
  22. #include <asm/io.h>
  23. #include <asm/prom.h>
  24. #include <asm/pci-bridge.h>
  25. #include <asm/machdep.h>
  26. #include <asm/msi_bitmap.h>
  27. #include <asm/ppc-pci.h>
  28. #include <asm/pnv-pci.h>
  29. #include <asm/opal.h>
  30. #include <asm/iommu.h>
  31. #include <asm/tce.h>
  32. #include <asm/firmware.h>
  33. #include <asm/eeh_event.h>
  34. #include <asm/eeh.h>
  35. #include "powernv.h"
  36. #include "pci.h"
  37. static DEFINE_MUTEX(p2p_mutex);
  38. static DEFINE_MUTEX(tunnel_mutex);
  39. int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
  40. {
  41. struct device_node *parent = np;
  42. u32 bdfn;
  43. u64 phbid;
  44. int ret;
  45. ret = of_property_read_u32(np, "reg", &bdfn);
  46. if (ret)
  47. return -ENXIO;
  48. bdfn = ((bdfn & 0x00ffff00) >> 8);
  49. while ((parent = of_get_parent(parent))) {
  50. if (!PCI_DN(parent)) {
  51. of_node_put(parent);
  52. break;
  53. }
  54. if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) {
  55. of_node_put(parent);
  56. continue;
  57. }
  58. ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid);
  59. if (ret) {
  60. of_node_put(parent);
  61. return -ENXIO;
  62. }
  63. *id = PCI_SLOT_ID(phbid, bdfn);
  64. return 0;
  65. }
  66. return -ENODEV;
  67. }
  68. EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
  69. int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
  70. {
  71. int64_t rc;
  72. if (!opal_check_token(OPAL_GET_DEVICE_TREE))
  73. return -ENXIO;
  74. rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
  75. if (rc < OPAL_SUCCESS)
  76. return -EIO;
  77. return rc;
  78. }
  79. EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
  80. int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
  81. {
  82. int64_t rc;
  83. if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
  84. return -ENXIO;
  85. rc = opal_pci_get_presence_state(id, (uint64_t)state);
  86. if (rc != OPAL_SUCCESS)
  87. return -EIO;
  88. return 0;
  89. }
  90. EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
  91. int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
  92. {
  93. int64_t rc;
  94. if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
  95. return -ENXIO;
  96. rc = opal_pci_get_power_state(id, (uint64_t)state);
  97. if (rc != OPAL_SUCCESS)
  98. return -EIO;
  99. return 0;
  100. }
  101. EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
  102. int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
  103. {
  104. struct opal_msg m;
  105. int token, ret;
  106. int64_t rc;
  107. if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
  108. return -ENXIO;
  109. token = opal_async_get_token_interruptible();
  110. if (unlikely(token < 0))
  111. return token;
  112. rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
  113. if (rc == OPAL_SUCCESS) {
  114. ret = 0;
  115. goto exit;
  116. } else if (rc != OPAL_ASYNC_COMPLETION) {
  117. ret = -EIO;
  118. goto exit;
  119. }
  120. ret = opal_async_wait_response(token, &m);
  121. if (ret < 0)
  122. goto exit;
  123. if (msg) {
  124. ret = 1;
  125. memcpy(msg, &m, sizeof(m));
  126. }
  127. exit:
  128. opal_async_release_token(token);
  129. return ret;
  130. }
  131. EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
  132. #ifdef CONFIG_PCI_MSI
  133. int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  134. {
  135. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  136. struct pnv_phb *phb = hose->private_data;
  137. struct msi_desc *entry;
  138. struct msi_msg msg;
  139. int hwirq;
  140. unsigned int virq;
  141. int rc;
  142. if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
  143. return -ENODEV;
  144. if (pdev->no_64bit_msi && !phb->msi32_support)
  145. return -ENODEV;
  146. for_each_pci_msi_entry(entry, pdev) {
  147. if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
  148. pr_warn("%s: Supports only 64-bit MSIs\n",
  149. pci_name(pdev));
  150. return -ENXIO;
  151. }
  152. hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
  153. if (hwirq < 0) {
  154. pr_warn("%s: Failed to find a free MSI\n",
  155. pci_name(pdev));
  156. return -ENOSPC;
  157. }
  158. virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
  159. if (!virq) {
  160. pr_warn("%s: Failed to map MSI to linux irq\n",
  161. pci_name(pdev));
  162. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  163. return -ENOMEM;
  164. }
  165. rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
  166. virq, entry->msi_attrib.is_64, &msg);
  167. if (rc) {
  168. pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
  169. irq_dispose_mapping(virq);
  170. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  171. return rc;
  172. }
  173. irq_set_msi_desc(virq, entry);
  174. pci_write_msi_msg(virq, &msg);
  175. }
  176. return 0;
  177. }
  178. void pnv_teardown_msi_irqs(struct pci_dev *pdev)
  179. {
  180. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  181. struct pnv_phb *phb = hose->private_data;
  182. struct msi_desc *entry;
  183. irq_hw_number_t hwirq;
  184. if (WARN_ON(!phb))
  185. return;
  186. for_each_pci_msi_entry(entry, pdev) {
  187. if (!entry->irq)
  188. continue;
  189. hwirq = virq_to_hw(entry->irq);
  190. irq_set_msi_desc(entry->irq, NULL);
  191. irq_dispose_mapping(entry->irq);
  192. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
  193. }
  194. }
  195. #endif /* CONFIG_PCI_MSI */
  196. /* Nicely print the contents of the PE State Tables (PEST). */
  197. static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
  198. {
  199. __be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
  200. bool dup = false;
  201. int i;
  202. for (i = 0; i < pest_size; i++) {
  203. __be64 peA = be64_to_cpu(pestA[i]);
  204. __be64 peB = be64_to_cpu(pestB[i]);
  205. if (peA != prevA || peB != prevB) {
  206. if (dup) {
  207. pr_info("PE[..%03x] A/B: as above\n", i-1);
  208. dup = false;
  209. }
  210. prevA = peA;
  211. prevB = peB;
  212. if (peA & PNV_IODA_STOPPED_STATE ||
  213. peB & PNV_IODA_STOPPED_STATE)
  214. pr_info("PE[%03x] A/B: %016llx %016llx\n",
  215. i, peA, peB);
  216. } else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
  217. peB & PNV_IODA_STOPPED_STATE)) {
  218. dup = true;
  219. }
  220. }
  221. }
  222. static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
  223. struct OpalIoPhbErrorCommon *common)
  224. {
  225. struct OpalIoP7IOCPhbErrorData *data;
  226. data = (struct OpalIoP7IOCPhbErrorData *)common;
  227. pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
  228. hose->global_number, be32_to_cpu(common->version));
  229. if (data->brdgCtl)
  230. pr_info("brdgCtl: %08x\n",
  231. be32_to_cpu(data->brdgCtl));
  232. if (data->portStatusReg || data->rootCmplxStatus ||
  233. data->busAgentStatus)
  234. pr_info("UtlSts: %08x %08x %08x\n",
  235. be32_to_cpu(data->portStatusReg),
  236. be32_to_cpu(data->rootCmplxStatus),
  237. be32_to_cpu(data->busAgentStatus));
  238. if (data->deviceStatus || data->slotStatus ||
  239. data->linkStatus || data->devCmdStatus ||
  240. data->devSecStatus)
  241. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  242. be32_to_cpu(data->deviceStatus),
  243. be32_to_cpu(data->slotStatus),
  244. be32_to_cpu(data->linkStatus),
  245. be32_to_cpu(data->devCmdStatus),
  246. be32_to_cpu(data->devSecStatus));
  247. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  248. data->corrErrorStatus)
  249. pr_info("RootErrSts: %08x %08x %08x\n",
  250. be32_to_cpu(data->rootErrorStatus),
  251. be32_to_cpu(data->uncorrErrorStatus),
  252. be32_to_cpu(data->corrErrorStatus));
  253. if (data->tlpHdr1 || data->tlpHdr2 ||
  254. data->tlpHdr3 || data->tlpHdr4)
  255. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  256. be32_to_cpu(data->tlpHdr1),
  257. be32_to_cpu(data->tlpHdr2),
  258. be32_to_cpu(data->tlpHdr3),
  259. be32_to_cpu(data->tlpHdr4));
  260. if (data->sourceId || data->errorClass ||
  261. data->correlator)
  262. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  263. be32_to_cpu(data->sourceId),
  264. be64_to_cpu(data->errorClass),
  265. be64_to_cpu(data->correlator));
  266. if (data->p7iocPlssr || data->p7iocCsr)
  267. pr_info("PhbSts: %016llx %016llx\n",
  268. be64_to_cpu(data->p7iocPlssr),
  269. be64_to_cpu(data->p7iocCsr));
  270. if (data->lemFir)
  271. pr_info("Lem: %016llx %016llx %016llx\n",
  272. be64_to_cpu(data->lemFir),
  273. be64_to_cpu(data->lemErrorMask),
  274. be64_to_cpu(data->lemWOF));
  275. if (data->phbErrorStatus)
  276. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  277. be64_to_cpu(data->phbErrorStatus),
  278. be64_to_cpu(data->phbFirstErrorStatus),
  279. be64_to_cpu(data->phbErrorLog0),
  280. be64_to_cpu(data->phbErrorLog1));
  281. if (data->mmioErrorStatus)
  282. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  283. be64_to_cpu(data->mmioErrorStatus),
  284. be64_to_cpu(data->mmioFirstErrorStatus),
  285. be64_to_cpu(data->mmioErrorLog0),
  286. be64_to_cpu(data->mmioErrorLog1));
  287. if (data->dma0ErrorStatus)
  288. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  289. be64_to_cpu(data->dma0ErrorStatus),
  290. be64_to_cpu(data->dma0FirstErrorStatus),
  291. be64_to_cpu(data->dma0ErrorLog0),
  292. be64_to_cpu(data->dma0ErrorLog1));
  293. if (data->dma1ErrorStatus)
  294. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  295. be64_to_cpu(data->dma1ErrorStatus),
  296. be64_to_cpu(data->dma1FirstErrorStatus),
  297. be64_to_cpu(data->dma1ErrorLog0),
  298. be64_to_cpu(data->dma1ErrorLog1));
  299. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
  300. }
  301. static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
  302. struct OpalIoPhbErrorCommon *common)
  303. {
  304. struct OpalIoPhb3ErrorData *data;
  305. data = (struct OpalIoPhb3ErrorData*)common;
  306. pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
  307. hose->global_number, be32_to_cpu(common->version));
  308. if (data->brdgCtl)
  309. pr_info("brdgCtl: %08x\n",
  310. be32_to_cpu(data->brdgCtl));
  311. if (data->portStatusReg || data->rootCmplxStatus ||
  312. data->busAgentStatus)
  313. pr_info("UtlSts: %08x %08x %08x\n",
  314. be32_to_cpu(data->portStatusReg),
  315. be32_to_cpu(data->rootCmplxStatus),
  316. be32_to_cpu(data->busAgentStatus));
  317. if (data->deviceStatus || data->slotStatus ||
  318. data->linkStatus || data->devCmdStatus ||
  319. data->devSecStatus)
  320. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  321. be32_to_cpu(data->deviceStatus),
  322. be32_to_cpu(data->slotStatus),
  323. be32_to_cpu(data->linkStatus),
  324. be32_to_cpu(data->devCmdStatus),
  325. be32_to_cpu(data->devSecStatus));
  326. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  327. data->corrErrorStatus)
  328. pr_info("RootErrSts: %08x %08x %08x\n",
  329. be32_to_cpu(data->rootErrorStatus),
  330. be32_to_cpu(data->uncorrErrorStatus),
  331. be32_to_cpu(data->corrErrorStatus));
  332. if (data->tlpHdr1 || data->tlpHdr2 ||
  333. data->tlpHdr3 || data->tlpHdr4)
  334. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  335. be32_to_cpu(data->tlpHdr1),
  336. be32_to_cpu(data->tlpHdr2),
  337. be32_to_cpu(data->tlpHdr3),
  338. be32_to_cpu(data->tlpHdr4));
  339. if (data->sourceId || data->errorClass ||
  340. data->correlator)
  341. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  342. be32_to_cpu(data->sourceId),
  343. be64_to_cpu(data->errorClass),
  344. be64_to_cpu(data->correlator));
  345. if (data->nFir)
  346. pr_info("nFir: %016llx %016llx %016llx\n",
  347. be64_to_cpu(data->nFir),
  348. be64_to_cpu(data->nFirMask),
  349. be64_to_cpu(data->nFirWOF));
  350. if (data->phbPlssr || data->phbCsr)
  351. pr_info("PhbSts: %016llx %016llx\n",
  352. be64_to_cpu(data->phbPlssr),
  353. be64_to_cpu(data->phbCsr));
  354. if (data->lemFir)
  355. pr_info("Lem: %016llx %016llx %016llx\n",
  356. be64_to_cpu(data->lemFir),
  357. be64_to_cpu(data->lemErrorMask),
  358. be64_to_cpu(data->lemWOF));
  359. if (data->phbErrorStatus)
  360. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  361. be64_to_cpu(data->phbErrorStatus),
  362. be64_to_cpu(data->phbFirstErrorStatus),
  363. be64_to_cpu(data->phbErrorLog0),
  364. be64_to_cpu(data->phbErrorLog1));
  365. if (data->mmioErrorStatus)
  366. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  367. be64_to_cpu(data->mmioErrorStatus),
  368. be64_to_cpu(data->mmioFirstErrorStatus),
  369. be64_to_cpu(data->mmioErrorLog0),
  370. be64_to_cpu(data->mmioErrorLog1));
  371. if (data->dma0ErrorStatus)
  372. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  373. be64_to_cpu(data->dma0ErrorStatus),
  374. be64_to_cpu(data->dma0FirstErrorStatus),
  375. be64_to_cpu(data->dma0ErrorLog0),
  376. be64_to_cpu(data->dma0ErrorLog1));
  377. if (data->dma1ErrorStatus)
  378. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  379. be64_to_cpu(data->dma1ErrorStatus),
  380. be64_to_cpu(data->dma1FirstErrorStatus),
  381. be64_to_cpu(data->dma1ErrorLog0),
  382. be64_to_cpu(data->dma1ErrorLog1));
  383. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
  384. }
  385. static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
  386. struct OpalIoPhbErrorCommon *common)
  387. {
  388. struct OpalIoPhb4ErrorData *data;
  389. data = (struct OpalIoPhb4ErrorData*)common;
  390. pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
  391. hose->global_number, be32_to_cpu(common->version));
  392. if (data->brdgCtl)
  393. pr_info("brdgCtl: %08x\n",
  394. be32_to_cpu(data->brdgCtl));
  395. if (data->deviceStatus || data->slotStatus ||
  396. data->linkStatus || data->devCmdStatus ||
  397. data->devSecStatus)
  398. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  399. be32_to_cpu(data->deviceStatus),
  400. be32_to_cpu(data->slotStatus),
  401. be32_to_cpu(data->linkStatus),
  402. be32_to_cpu(data->devCmdStatus),
  403. be32_to_cpu(data->devSecStatus));
  404. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  405. data->corrErrorStatus)
  406. pr_info("RootErrSts: %08x %08x %08x\n",
  407. be32_to_cpu(data->rootErrorStatus),
  408. be32_to_cpu(data->uncorrErrorStatus),
  409. be32_to_cpu(data->corrErrorStatus));
  410. if (data->tlpHdr1 || data->tlpHdr2 ||
  411. data->tlpHdr3 || data->tlpHdr4)
  412. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  413. be32_to_cpu(data->tlpHdr1),
  414. be32_to_cpu(data->tlpHdr2),
  415. be32_to_cpu(data->tlpHdr3),
  416. be32_to_cpu(data->tlpHdr4));
  417. if (data->sourceId)
  418. pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId));
  419. if (data->nFir)
  420. pr_info("nFir: %016llx %016llx %016llx\n",
  421. be64_to_cpu(data->nFir),
  422. be64_to_cpu(data->nFirMask),
  423. be64_to_cpu(data->nFirWOF));
  424. if (data->phbPlssr || data->phbCsr)
  425. pr_info("PhbSts: %016llx %016llx\n",
  426. be64_to_cpu(data->phbPlssr),
  427. be64_to_cpu(data->phbCsr));
  428. if (data->lemFir)
  429. pr_info("Lem: %016llx %016llx %016llx\n",
  430. be64_to_cpu(data->lemFir),
  431. be64_to_cpu(data->lemErrorMask),
  432. be64_to_cpu(data->lemWOF));
  433. if (data->phbErrorStatus)
  434. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  435. be64_to_cpu(data->phbErrorStatus),
  436. be64_to_cpu(data->phbFirstErrorStatus),
  437. be64_to_cpu(data->phbErrorLog0),
  438. be64_to_cpu(data->phbErrorLog1));
  439. if (data->phbTxeErrorStatus)
  440. pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n",
  441. be64_to_cpu(data->phbTxeErrorStatus),
  442. be64_to_cpu(data->phbTxeFirstErrorStatus),
  443. be64_to_cpu(data->phbTxeErrorLog0),
  444. be64_to_cpu(data->phbTxeErrorLog1));
  445. if (data->phbRxeArbErrorStatus)
  446. pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n",
  447. be64_to_cpu(data->phbRxeArbErrorStatus),
  448. be64_to_cpu(data->phbRxeArbFirstErrorStatus),
  449. be64_to_cpu(data->phbRxeArbErrorLog0),
  450. be64_to_cpu(data->phbRxeArbErrorLog1));
  451. if (data->phbRxeMrgErrorStatus)
  452. pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n",
  453. be64_to_cpu(data->phbRxeMrgErrorStatus),
  454. be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
  455. be64_to_cpu(data->phbRxeMrgErrorLog0),
  456. be64_to_cpu(data->phbRxeMrgErrorLog1));
  457. if (data->phbRxeTceErrorStatus)
  458. pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n",
  459. be64_to_cpu(data->phbRxeTceErrorStatus),
  460. be64_to_cpu(data->phbRxeTceFirstErrorStatus),
  461. be64_to_cpu(data->phbRxeTceErrorLog0),
  462. be64_to_cpu(data->phbRxeTceErrorLog1));
  463. if (data->phbPblErrorStatus)
  464. pr_info("PblErr: %016llx %016llx %016llx %016llx\n",
  465. be64_to_cpu(data->phbPblErrorStatus),
  466. be64_to_cpu(data->phbPblFirstErrorStatus),
  467. be64_to_cpu(data->phbPblErrorLog0),
  468. be64_to_cpu(data->phbPblErrorLog1));
  469. if (data->phbPcieDlpErrorStatus)
  470. pr_info("PcieDlp: %016llx %016llx %016llx\n",
  471. be64_to_cpu(data->phbPcieDlpErrorLog1),
  472. be64_to_cpu(data->phbPcieDlpErrorLog2),
  473. be64_to_cpu(data->phbPcieDlpErrorStatus));
  474. if (data->phbRegbErrorStatus)
  475. pr_info("RegbErr: %016llx %016llx %016llx %016llx\n",
  476. be64_to_cpu(data->phbRegbErrorStatus),
  477. be64_to_cpu(data->phbRegbFirstErrorStatus),
  478. be64_to_cpu(data->phbRegbErrorLog0),
  479. be64_to_cpu(data->phbRegbErrorLog1));
  480. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
  481. }
  482. void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
  483. unsigned char *log_buff)
  484. {
  485. struct OpalIoPhbErrorCommon *common;
  486. if (!hose || !log_buff)
  487. return;
  488. common = (struct OpalIoPhbErrorCommon *)log_buff;
  489. switch (be32_to_cpu(common->ioType)) {
  490. case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
  491. pnv_pci_dump_p7ioc_diag_data(hose, common);
  492. break;
  493. case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
  494. pnv_pci_dump_phb3_diag_data(hose, common);
  495. break;
  496. case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
  497. pnv_pci_dump_phb4_diag_data(hose, common);
  498. break;
  499. default:
  500. pr_warn("%s: Unrecognized ioType %d\n",
  501. __func__, be32_to_cpu(common->ioType));
  502. }
  503. }
  504. static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
  505. {
  506. unsigned long flags, rc;
  507. int has_diag, ret = 0;
  508. spin_lock_irqsave(&phb->lock, flags);
  509. /* Fetch PHB diag-data */
  510. rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
  511. phb->diag_data_size);
  512. has_diag = (rc == OPAL_SUCCESS);
  513. /* If PHB supports compound PE, to handle it */
  514. if (phb->unfreeze_pe) {
  515. ret = phb->unfreeze_pe(phb,
  516. pe_no,
  517. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  518. } else {
  519. rc = opal_pci_eeh_freeze_clear(phb->opal_id,
  520. pe_no,
  521. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  522. if (rc) {
  523. pr_warn("%s: Failure %ld clearing frozen "
  524. "PHB#%x-PE#%x\n",
  525. __func__, rc, phb->hose->global_number,
  526. pe_no);
  527. ret = -EIO;
  528. }
  529. }
  530. /*
  531. * For now, let's only display the diag buffer when we fail to clear
  532. * the EEH status. We'll do more sensible things later when we have
  533. * proper EEH support. We need to make sure we don't pollute ourselves
  534. * with the normal errors generated when probing empty slots
  535. */
  536. if (has_diag && ret)
  537. pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
  538. spin_unlock_irqrestore(&phb->lock, flags);
  539. }
  540. static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
  541. {
  542. struct pnv_phb *phb = pdn->phb->private_data;
  543. u8 fstate;
  544. __be16 pcierr;
  545. unsigned int pe_no;
  546. s64 rc;
  547. /*
  548. * Get the PE#. During the PCI probe stage, we might not
  549. * setup that yet. So all ER errors should be mapped to
  550. * reserved PE.
  551. */
  552. pe_no = pdn->pe_number;
  553. if (pe_no == IODA_INVALID_PE) {
  554. pe_no = phb->ioda.reserved_pe_idx;
  555. }
  556. /*
  557. * Fetch frozen state. If the PHB support compound PE,
  558. * we need handle that case.
  559. */
  560. if (phb->get_pe_state) {
  561. fstate = phb->get_pe_state(phb, pe_no);
  562. } else {
  563. rc = opal_pci_eeh_freeze_status(phb->opal_id,
  564. pe_no,
  565. &fstate,
  566. &pcierr,
  567. NULL);
  568. if (rc) {
  569. pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
  570. __func__, rc, phb->hose->global_number, pe_no);
  571. return;
  572. }
  573. }
  574. pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
  575. (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
  576. /* Clear the frozen state if applicable */
  577. if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
  578. fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
  579. fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
  580. /*
  581. * If PHB supports compound PE, freeze it for
  582. * consistency.
  583. */
  584. if (phb->freeze_pe)
  585. phb->freeze_pe(phb, pe_no);
  586. pnv_pci_handle_eeh_config(phb, pe_no);
  587. }
  588. }
  589. int pnv_pci_cfg_read(struct pci_dn *pdn,
  590. int where, int size, u32 *val)
  591. {
  592. struct pnv_phb *phb = pdn->phb->private_data;
  593. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  594. s64 rc;
  595. switch (size) {
  596. case 1: {
  597. u8 v8;
  598. rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
  599. *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
  600. break;
  601. }
  602. case 2: {
  603. __be16 v16;
  604. rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
  605. &v16);
  606. *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
  607. break;
  608. }
  609. case 4: {
  610. __be32 v32;
  611. rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
  612. *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
  613. break;
  614. }
  615. default:
  616. return PCIBIOS_FUNC_NOT_SUPPORTED;
  617. }
  618. pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  619. __func__, pdn->busno, pdn->devfn, where, size, *val);
  620. return PCIBIOS_SUCCESSFUL;
  621. }
  622. int pnv_pci_cfg_write(struct pci_dn *pdn,
  623. int where, int size, u32 val)
  624. {
  625. struct pnv_phb *phb = pdn->phb->private_data;
  626. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  627. pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  628. __func__, pdn->busno, pdn->devfn, where, size, val);
  629. switch (size) {
  630. case 1:
  631. opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
  632. break;
  633. case 2:
  634. opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
  635. break;
  636. case 4:
  637. opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
  638. break;
  639. default:
  640. return PCIBIOS_FUNC_NOT_SUPPORTED;
  641. }
  642. return PCIBIOS_SUCCESSFUL;
  643. }
  644. #if CONFIG_EEH
  645. static bool pnv_pci_cfg_check(struct pci_dn *pdn)
  646. {
  647. struct eeh_dev *edev = NULL;
  648. struct pnv_phb *phb = pdn->phb->private_data;
  649. /* EEH not enabled ? */
  650. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  651. return true;
  652. /* PE reset or device removed ? */
  653. edev = pdn->edev;
  654. if (edev) {
  655. if (edev->pe &&
  656. (edev->pe->state & EEH_PE_CFG_BLOCKED))
  657. return false;
  658. if (edev->mode & EEH_DEV_REMOVED)
  659. return false;
  660. }
  661. return true;
  662. }
  663. #else
  664. static inline pnv_pci_cfg_check(struct pci_dn *pdn)
  665. {
  666. return true;
  667. }
  668. #endif /* CONFIG_EEH */
  669. static int pnv_pci_read_config(struct pci_bus *bus,
  670. unsigned int devfn,
  671. int where, int size, u32 *val)
  672. {
  673. struct pci_dn *pdn;
  674. struct pnv_phb *phb;
  675. int ret;
  676. *val = 0xFFFFFFFF;
  677. pdn = pci_get_pdn_by_devfn(bus, devfn);
  678. if (!pdn)
  679. return PCIBIOS_DEVICE_NOT_FOUND;
  680. if (!pnv_pci_cfg_check(pdn))
  681. return PCIBIOS_DEVICE_NOT_FOUND;
  682. ret = pnv_pci_cfg_read(pdn, where, size, val);
  683. phb = pdn->phb->private_data;
  684. if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
  685. if (*val == EEH_IO_ERROR_VALUE(size) &&
  686. eeh_dev_check_failure(pdn->edev))
  687. return PCIBIOS_DEVICE_NOT_FOUND;
  688. } else {
  689. pnv_pci_config_check_eeh(pdn);
  690. }
  691. return ret;
  692. }
  693. static int pnv_pci_write_config(struct pci_bus *bus,
  694. unsigned int devfn,
  695. int where, int size, u32 val)
  696. {
  697. struct pci_dn *pdn;
  698. struct pnv_phb *phb;
  699. int ret;
  700. pdn = pci_get_pdn_by_devfn(bus, devfn);
  701. if (!pdn)
  702. return PCIBIOS_DEVICE_NOT_FOUND;
  703. if (!pnv_pci_cfg_check(pdn))
  704. return PCIBIOS_DEVICE_NOT_FOUND;
  705. ret = pnv_pci_cfg_write(pdn, where, size, val);
  706. phb = pdn->phb->private_data;
  707. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  708. pnv_pci_config_check_eeh(pdn);
  709. return ret;
  710. }
  711. struct pci_ops pnv_pci_ops = {
  712. .read = pnv_pci_read_config,
  713. .write = pnv_pci_write_config,
  714. };
  715. struct iommu_table *pnv_pci_table_alloc(int nid)
  716. {
  717. struct iommu_table *tbl;
  718. tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
  719. if (!tbl)
  720. return NULL;
  721. INIT_LIST_HEAD_RCU(&tbl->it_group_list);
  722. kref_init(&tbl->it_kref);
  723. return tbl;
  724. }
  725. void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
  726. {
  727. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  728. struct pnv_phb *phb = hose->private_data;
  729. #ifdef CONFIG_PCI_IOV
  730. struct pnv_ioda_pe *pe;
  731. struct pci_dn *pdn;
  732. /* Fix the VF pdn PE number */
  733. if (pdev->is_virtfn) {
  734. pdn = pci_get_pdn(pdev);
  735. WARN_ON(pdn->pe_number != IODA_INVALID_PE);
  736. list_for_each_entry(pe, &phb->ioda.pe_list, list) {
  737. if (pe->rid == ((pdev->bus->number << 8) |
  738. (pdev->devfn & 0xff))) {
  739. pdn->pe_number = pe->pe_number;
  740. pe->pdev = pdev;
  741. break;
  742. }
  743. }
  744. }
  745. #endif /* CONFIG_PCI_IOV */
  746. if (phb && phb->dma_dev_setup)
  747. phb->dma_dev_setup(phb, pdev);
  748. }
  749. void pnv_pci_dma_bus_setup(struct pci_bus *bus)
  750. {
  751. struct pci_controller *hose = bus->sysdata;
  752. struct pnv_phb *phb = hose->private_data;
  753. struct pnv_ioda_pe *pe;
  754. list_for_each_entry(pe, &phb->ioda.pe_list, list) {
  755. if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
  756. continue;
  757. if (!pe->pbus)
  758. continue;
  759. if (bus->number == ((pe->rid >> 8) & 0xFF)) {
  760. pe->pbus = bus;
  761. break;
  762. }
  763. }
  764. }
  765. int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc)
  766. {
  767. struct pci_controller *hose;
  768. struct pnv_phb *phb_init, *phb_target;
  769. struct pnv_ioda_pe *pe_init;
  770. int rc;
  771. if (!opal_check_token(OPAL_PCI_SET_P2P))
  772. return -ENXIO;
  773. hose = pci_bus_to_host(initiator->bus);
  774. phb_init = hose->private_data;
  775. hose = pci_bus_to_host(target->bus);
  776. phb_target = hose->private_data;
  777. pe_init = pnv_ioda_get_pe(initiator);
  778. if (!pe_init)
  779. return -ENODEV;
  780. /*
  781. * Configuring the initiator's PHB requires to adjust its
  782. * TVE#1 setting. Since the same device can be an initiator
  783. * several times for different target devices, we need to keep
  784. * a reference count to know when we can restore the default
  785. * bypass setting on its TVE#1 when disabling. Opal is not
  786. * tracking PE states, so we add a reference count on the PE
  787. * in linux.
  788. *
  789. * For the target, the configuration is per PHB, so we keep a
  790. * target reference count on the PHB.
  791. */
  792. mutex_lock(&p2p_mutex);
  793. if (desc & OPAL_PCI_P2P_ENABLE) {
  794. /* always go to opal to validate the configuration */
  795. rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id,
  796. desc, pe_init->pe_number);
  797. if (rc != OPAL_SUCCESS) {
  798. rc = -EIO;
  799. goto out;
  800. }
  801. pe_init->p2p_initiator_count++;
  802. phb_target->p2p_target_count++;
  803. } else {
  804. if (!pe_init->p2p_initiator_count ||
  805. !phb_target->p2p_target_count) {
  806. rc = -EINVAL;
  807. goto out;
  808. }
  809. if (--pe_init->p2p_initiator_count == 0)
  810. pnv_pci_ioda2_set_bypass(pe_init, true);
  811. if (--phb_target->p2p_target_count == 0) {
  812. rc = opal_pci_set_p2p(phb_init->opal_id,
  813. phb_target->opal_id, desc,
  814. pe_init->pe_number);
  815. if (rc != OPAL_SUCCESS) {
  816. rc = -EIO;
  817. goto out;
  818. }
  819. }
  820. }
  821. rc = 0;
  822. out:
  823. mutex_unlock(&p2p_mutex);
  824. return rc;
  825. }
  826. EXPORT_SYMBOL_GPL(pnv_pci_set_p2p);
  827. struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
  828. {
  829. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  830. return of_node_get(hose->dn);
  831. }
  832. EXPORT_SYMBOL(pnv_pci_get_phb_node);
  833. int pnv_pci_enable_tunnel(struct pci_dev *dev, u64 *asnind)
  834. {
  835. struct device_node *np;
  836. const __be32 *prop;
  837. struct pnv_ioda_pe *pe;
  838. uint16_t window_id;
  839. int rc;
  840. if (!radix_enabled())
  841. return -ENXIO;
  842. if (!(np = pnv_pci_get_phb_node(dev)))
  843. return -ENXIO;
  844. prop = of_get_property(np, "ibm,phb-indications", NULL);
  845. of_node_put(np);
  846. if (!prop || !prop[1])
  847. return -ENXIO;
  848. *asnind = (u64)be32_to_cpu(prop[1]);
  849. pe = pnv_ioda_get_pe(dev);
  850. if (!pe)
  851. return -ENODEV;
  852. /* Increase real window size to accept as_notify messages. */
  853. window_id = (pe->pe_number << 1 ) + 1;
  854. rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number,
  855. window_id, pe->tce_bypass_base,
  856. (uint64_t)1 << 48);
  857. return opal_error_code(rc);
  858. }
  859. EXPORT_SYMBOL_GPL(pnv_pci_enable_tunnel);
  860. int pnv_pci_disable_tunnel(struct pci_dev *dev)
  861. {
  862. struct pnv_ioda_pe *pe;
  863. pe = pnv_ioda_get_pe(dev);
  864. if (!pe)
  865. return -ENODEV;
  866. /* Restore default real window size. */
  867. pnv_pci_ioda2_set_bypass(pe, true);
  868. return 0;
  869. }
  870. EXPORT_SYMBOL_GPL(pnv_pci_disable_tunnel);
  871. int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
  872. {
  873. __be64 val;
  874. struct pci_controller *hose;
  875. struct pnv_phb *phb;
  876. u64 tunnel_bar;
  877. int rc;
  878. if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
  879. return -ENXIO;
  880. if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
  881. return -ENXIO;
  882. hose = pci_bus_to_host(dev->bus);
  883. phb = hose->private_data;
  884. mutex_lock(&tunnel_mutex);
  885. rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
  886. if (rc != OPAL_SUCCESS) {
  887. rc = -EIO;
  888. goto out;
  889. }
  890. tunnel_bar = be64_to_cpu(val);
  891. if (enable) {
  892. /*
  893. * Only one device per PHB can use atomics.
  894. * Our policy is first-come, first-served.
  895. */
  896. if (tunnel_bar) {
  897. if (tunnel_bar != addr)
  898. rc = -EBUSY;
  899. else
  900. rc = 0; /* Setting same address twice is ok */
  901. goto out;
  902. }
  903. } else {
  904. /*
  905. * The device that owns atomics and wants to release
  906. * them must pass the same address with enable == 0.
  907. */
  908. if (tunnel_bar != addr) {
  909. rc = -EPERM;
  910. goto out;
  911. }
  912. addr = 0x0ULL;
  913. }
  914. rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
  915. rc = opal_error_code(rc);
  916. out:
  917. mutex_unlock(&tunnel_mutex);
  918. return rc;
  919. }
  920. EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
  921. #ifdef CONFIG_PPC64 /* for thread.tidr */
  922. int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, u32 *pid,
  923. u32 *tid)
  924. {
  925. struct mm_struct *mm = NULL;
  926. if (task == NULL)
  927. return -EINVAL;
  928. mm = get_task_mm(task);
  929. if (mm == NULL)
  930. return -EINVAL;
  931. *pid = mm->context.id;
  932. mmput(mm);
  933. *tid = task->thread.tidr;
  934. *lpid = mfspr(SPRN_LPID);
  935. return 0;
  936. }
  937. EXPORT_SYMBOL_GPL(pnv_pci_get_as_notify_info);
  938. #endif
  939. void pnv_pci_shutdown(void)
  940. {
  941. struct pci_controller *hose;
  942. list_for_each_entry(hose, &hose_list, list_node)
  943. if (hose->controller_ops.shutdown)
  944. hose->controller_ops.shutdown(hose);
  945. }
  946. /* Fixup wrong class code in p7ioc and p8 root complex */
  947. static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
  948. {
  949. dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  950. }
  951. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
  952. void __init pnv_pci_init(void)
  953. {
  954. struct device_node *np;
  955. pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
  956. /* If we don't have OPAL, eg. in sim, just skip PCI probe */
  957. if (!firmware_has_feature(FW_FEATURE_OPAL))
  958. return;
  959. /* Look for IODA IO-Hubs. */
  960. for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
  961. pnv_pci_init_ioda_hub(np);
  962. }
  963. /* Look for ioda2 built-in PHB3's */
  964. for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
  965. pnv_pci_init_ioda2_phb(np);
  966. /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
  967. for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
  968. pnv_pci_init_ioda2_phb(np);
  969. /* Look for NPU PHBs */
  970. for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
  971. pnv_pci_init_npu_phb(np);
  972. /*
  973. * Look for NPU2 PHBs which we treat mostly as NPU PHBs with
  974. * the exception of TCE kill which requires an OPAL call.
  975. */
  976. for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
  977. pnv_pci_init_npu_phb(np);
  978. /* Look for NPU2 OpenCAPI PHBs */
  979. for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
  980. pnv_pci_init_npu2_opencapi_phb(np);
  981. /* Configure IOMMU DMA hooks */
  982. set_pci_dma_ops(&dma_iommu_ops);
  983. }
  984. machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);