pci.c 21 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2012
  4. *
  5. * Author(s):
  6. * Jan Glauber <jang@linux.vnet.ibm.com>
  7. *
  8. * The System z PCI code is a rewrite from a prototype by
  9. * the following people (Kudoz!):
  10. * Alexander Schmidt
  11. * Christoph Raisch
  12. * Hannes Hering
  13. * Hoang-Nam Nguyen
  14. * Jan-Bernd Themann
  15. * Stefan Roscher
  16. * Thomas Klein
  17. */
  18. #define KMSG_COMPONENT "zpci"
  19. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20. #include <linux/kernel.h>
  21. #include <linux/slab.h>
  22. #include <linux/err.h>
  23. #include <linux/export.h>
  24. #include <linux/delay.h>
  25. #include <linux/irq.h>
  26. #include <linux/kernel_stat.h>
  27. #include <linux/seq_file.h>
  28. #include <linux/pci.h>
  29. #include <linux/msi.h>
  30. #include <asm/isc.h>
  31. #include <asm/airq.h>
  32. #include <asm/facility.h>
  33. #include <asm/pci_insn.h>
  34. #include <asm/pci_clp.h>
  35. #include <asm/pci_dma.h>
  36. #define DEBUG /* enable pr_debug */
  37. #define SIC_IRQ_MODE_ALL 0
  38. #define SIC_IRQ_MODE_SINGLE 1
  39. #define ZPCI_NR_DMA_SPACES 1
  40. #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
  41. /* list of all detected zpci devices */
  42. static LIST_HEAD(zpci_list);
  43. static DEFINE_SPINLOCK(zpci_list_lock);
  44. static struct irq_chip zpci_irq_chip = {
  45. .name = "zPCI",
  46. .irq_unmask = pci_msi_unmask_irq,
  47. .irq_mask = pci_msi_mask_irq,
  48. };
  49. static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
  50. static DEFINE_SPINLOCK(zpci_domain_lock);
  51. static struct airq_iv *zpci_aisb_iv;
  52. static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
  53. #define ZPCI_IOMAP_ENTRIES \
  54. min(((unsigned long) ZPCI_NR_DEVICES * PCI_BAR_COUNT / 2), \
  55. ZPCI_IOMAP_MAX_ENTRIES)
  56. static DEFINE_SPINLOCK(zpci_iomap_lock);
  57. static unsigned long *zpci_iomap_bitmap;
  58. struct zpci_iomap_entry *zpci_iomap_start;
  59. EXPORT_SYMBOL_GPL(zpci_iomap_start);
  60. static struct kmem_cache *zdev_fmb_cache;
  61. struct zpci_dev *get_zdev_by_fid(u32 fid)
  62. {
  63. struct zpci_dev *tmp, *zdev = NULL;
  64. spin_lock(&zpci_list_lock);
  65. list_for_each_entry(tmp, &zpci_list, entry) {
  66. if (tmp->fid == fid) {
  67. zdev = tmp;
  68. break;
  69. }
  70. }
  71. spin_unlock(&zpci_list_lock);
  72. return zdev;
  73. }
  74. void zpci_remove_reserved_devices(void)
  75. {
  76. struct zpci_dev *tmp, *zdev;
  77. enum zpci_state state;
  78. LIST_HEAD(remove);
  79. spin_lock(&zpci_list_lock);
  80. list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
  81. if (zdev->state == ZPCI_FN_STATE_STANDBY &&
  82. !clp_get_state(zdev->fid, &state) &&
  83. state == ZPCI_FN_STATE_RESERVED)
  84. list_move_tail(&zdev->entry, &remove);
  85. }
  86. spin_unlock(&zpci_list_lock);
  87. list_for_each_entry_safe(zdev, tmp, &remove, entry)
  88. zpci_remove_device(zdev);
  89. }
  90. static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
  91. {
  92. return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
  93. }
  94. int pci_domain_nr(struct pci_bus *bus)
  95. {
  96. return ((struct zpci_dev *) bus->sysdata)->domain;
  97. }
  98. EXPORT_SYMBOL_GPL(pci_domain_nr);
  99. int pci_proc_domain(struct pci_bus *bus)
  100. {
  101. return pci_domain_nr(bus);
  102. }
  103. EXPORT_SYMBOL_GPL(pci_proc_domain);
  104. /* Modify PCI: Register adapter interruptions */
  105. static int zpci_set_airq(struct zpci_dev *zdev)
  106. {
  107. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
  108. struct zpci_fib fib = {0};
  109. u8 status;
  110. fib.isc = PCI_ISC;
  111. fib.sum = 1; /* enable summary notifications */
  112. fib.noi = airq_iv_end(zdev->aibv);
  113. fib.aibv = (unsigned long) zdev->aibv->vector;
  114. fib.aibvo = 0; /* each zdev has its own interrupt vector */
  115. fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
  116. fib.aisbo = zdev->aisb & 63;
  117. return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
  118. }
  119. /* Modify PCI: Unregister adapter interruptions */
  120. static int zpci_clear_airq(struct zpci_dev *zdev)
  121. {
  122. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
  123. struct zpci_fib fib = {0};
  124. u8 cc, status;
  125. cc = zpci_mod_fc(req, &fib, &status);
  126. if (cc == 3 || (cc == 1 && status == 24))
  127. /* Function already gone or IRQs already deregistered. */
  128. cc = 0;
  129. return cc ? -EIO : 0;
  130. }
  131. /* Modify PCI: Register I/O address translation parameters */
  132. int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
  133. u64 base, u64 limit, u64 iota)
  134. {
  135. u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
  136. struct zpci_fib fib = {0};
  137. u8 status;
  138. WARN_ON_ONCE(iota & 0x3fff);
  139. fib.pba = base;
  140. fib.pal = limit;
  141. fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
  142. return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
  143. }
  144. /* Modify PCI: Unregister I/O address translation parameters */
  145. int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
  146. {
  147. u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
  148. struct zpci_fib fib = {0};
  149. u8 cc, status;
  150. cc = zpci_mod_fc(req, &fib, &status);
  151. if (cc == 3) /* Function already gone. */
  152. cc = 0;
  153. return cc ? -EIO : 0;
  154. }
  155. /* Modify PCI: Set PCI function measurement parameters */
  156. int zpci_fmb_enable_device(struct zpci_dev *zdev)
  157. {
  158. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
  159. struct zpci_fib fib = {0};
  160. u8 cc, status;
  161. if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
  162. return -EINVAL;
  163. zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
  164. if (!zdev->fmb)
  165. return -ENOMEM;
  166. WARN_ON((u64) zdev->fmb & 0xf);
  167. /* reset software counters */
  168. atomic64_set(&zdev->allocated_pages, 0);
  169. atomic64_set(&zdev->mapped_pages, 0);
  170. atomic64_set(&zdev->unmapped_pages, 0);
  171. fib.fmb_addr = virt_to_phys(zdev->fmb);
  172. cc = zpci_mod_fc(req, &fib, &status);
  173. if (cc) {
  174. kmem_cache_free(zdev_fmb_cache, zdev->fmb);
  175. zdev->fmb = NULL;
  176. }
  177. return cc ? -EIO : 0;
  178. }
  179. /* Modify PCI: Disable PCI function measurement */
  180. int zpci_fmb_disable_device(struct zpci_dev *zdev)
  181. {
  182. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
  183. struct zpci_fib fib = {0};
  184. u8 cc, status;
  185. if (!zdev->fmb)
  186. return -EINVAL;
  187. /* Function measurement is disabled if fmb address is zero */
  188. cc = zpci_mod_fc(req, &fib, &status);
  189. if (cc == 3) /* Function already gone. */
  190. cc = 0;
  191. if (!cc) {
  192. kmem_cache_free(zdev_fmb_cache, zdev->fmb);
  193. zdev->fmb = NULL;
  194. }
  195. return cc ? -EIO : 0;
  196. }
  197. static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
  198. {
  199. u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
  200. u64 data;
  201. int rc;
  202. rc = zpci_load(&data, req, offset);
  203. if (!rc) {
  204. data = le64_to_cpu((__force __le64) data);
  205. data >>= (8 - len) * 8;
  206. *val = (u32) data;
  207. } else
  208. *val = 0xffffffff;
  209. return rc;
  210. }
  211. static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
  212. {
  213. u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
  214. u64 data = val;
  215. int rc;
  216. data <<= (8 - len) * 8;
  217. data = (__force u64) cpu_to_le64(data);
  218. rc = zpci_store(data, req, offset);
  219. return rc;
  220. }
  221. resource_size_t pcibios_align_resource(void *data, const struct resource *res,
  222. resource_size_t size,
  223. resource_size_t align)
  224. {
  225. return 0;
  226. }
  227. /* combine single writes by using store-block insn */
  228. void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
  229. {
  230. zpci_memcpy_toio(to, from, count);
  231. }
  232. /* Create a virtual mapping cookie for a PCI BAR */
  233. void __iomem *pci_iomap_range(struct pci_dev *pdev,
  234. int bar,
  235. unsigned long offset,
  236. unsigned long max)
  237. {
  238. struct zpci_dev *zdev = to_zpci(pdev);
  239. int idx;
  240. if (!pci_resource_len(pdev, bar))
  241. return NULL;
  242. idx = zdev->bars[bar].map_idx;
  243. spin_lock(&zpci_iomap_lock);
  244. /* Detect overrun */
  245. WARN_ON(!++zpci_iomap_start[idx].count);
  246. zpci_iomap_start[idx].fh = zdev->fh;
  247. zpci_iomap_start[idx].bar = bar;
  248. spin_unlock(&zpci_iomap_lock);
  249. return (void __iomem *) ZPCI_ADDR(idx) + offset;
  250. }
  251. EXPORT_SYMBOL(pci_iomap_range);
  252. void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
  253. {
  254. return pci_iomap_range(dev, bar, 0, maxlen);
  255. }
  256. EXPORT_SYMBOL(pci_iomap);
  257. void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
  258. {
  259. unsigned int idx = ZPCI_IDX(addr);
  260. spin_lock(&zpci_iomap_lock);
  261. /* Detect underrun */
  262. WARN_ON(!zpci_iomap_start[idx].count);
  263. if (!--zpci_iomap_start[idx].count) {
  264. zpci_iomap_start[idx].fh = 0;
  265. zpci_iomap_start[idx].bar = 0;
  266. }
  267. spin_unlock(&zpci_iomap_lock);
  268. }
  269. EXPORT_SYMBOL(pci_iounmap);
  270. static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
  271. int size, u32 *val)
  272. {
  273. struct zpci_dev *zdev = get_zdev_by_bus(bus);
  274. int ret;
  275. if (!zdev || devfn != ZPCI_DEVFN)
  276. ret = -ENODEV;
  277. else
  278. ret = zpci_cfg_load(zdev, where, val, size);
  279. return ret;
  280. }
  281. static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
  282. int size, u32 val)
  283. {
  284. struct zpci_dev *zdev = get_zdev_by_bus(bus);
  285. int ret;
  286. if (!zdev || devfn != ZPCI_DEVFN)
  287. ret = -ENODEV;
  288. else
  289. ret = zpci_cfg_store(zdev, where, val, size);
  290. return ret;
  291. }
  292. static struct pci_ops pci_root_ops = {
  293. .read = pci_read,
  294. .write = pci_write,
  295. };
  296. static void zpci_irq_handler(struct airq_struct *airq)
  297. {
  298. unsigned long si, ai;
  299. struct airq_iv *aibv;
  300. int irqs_on = 0;
  301. inc_irq_stat(IRQIO_PCI);
  302. for (si = 0;;) {
  303. /* Scan adapter summary indicator bit vector */
  304. si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
  305. if (si == -1UL) {
  306. if (irqs_on++)
  307. /* End of second scan with interrupts on. */
  308. break;
  309. /* First scan complete, reenable interrupts. */
  310. if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
  311. break;
  312. si = 0;
  313. continue;
  314. }
  315. /* Scan the adapter interrupt vector for this device. */
  316. aibv = zpci_aibv[si];
  317. for (ai = 0;;) {
  318. ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
  319. if (ai == -1UL)
  320. break;
  321. inc_irq_stat(IRQIO_MSI);
  322. airq_iv_lock(aibv, ai);
  323. generic_handle_irq(airq_iv_get_data(aibv, ai));
  324. airq_iv_unlock(aibv, ai);
  325. }
  326. }
  327. }
  328. int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  329. {
  330. struct zpci_dev *zdev = to_zpci(pdev);
  331. unsigned int hwirq, msi_vecs;
  332. unsigned long aisb;
  333. struct msi_desc *msi;
  334. struct msi_msg msg;
  335. int rc, irq;
  336. zdev->aisb = -1UL;
  337. if (type == PCI_CAP_ID_MSI && nvec > 1)
  338. return 1;
  339. msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
  340. /* Allocate adapter summary indicator bit */
  341. aisb = airq_iv_alloc_bit(zpci_aisb_iv);
  342. if (aisb == -1UL)
  343. return -EIO;
  344. zdev->aisb = aisb;
  345. /* Create adapter interrupt vector */
  346. zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
  347. if (!zdev->aibv)
  348. return -ENOMEM;
  349. /* Wire up shortcut pointer */
  350. zpci_aibv[aisb] = zdev->aibv;
  351. /* Request MSI interrupts */
  352. hwirq = 0;
  353. for_each_pci_msi_entry(msi, pdev) {
  354. rc = -EIO;
  355. irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
  356. if (irq < 0)
  357. return -ENOMEM;
  358. rc = irq_set_msi_desc(irq, msi);
  359. if (rc)
  360. return rc;
  361. irq_set_chip_and_handler(irq, &zpci_irq_chip,
  362. handle_simple_irq);
  363. msg.data = hwirq;
  364. msg.address_lo = zdev->msi_addr & 0xffffffff;
  365. msg.address_hi = zdev->msi_addr >> 32;
  366. pci_write_msi_msg(irq, &msg);
  367. airq_iv_set_data(zdev->aibv, hwirq, irq);
  368. hwirq++;
  369. }
  370. /* Enable adapter interrupts */
  371. rc = zpci_set_airq(zdev);
  372. if (rc)
  373. return rc;
  374. return (msi_vecs == nvec) ? 0 : msi_vecs;
  375. }
  376. void arch_teardown_msi_irqs(struct pci_dev *pdev)
  377. {
  378. struct zpci_dev *zdev = to_zpci(pdev);
  379. struct msi_desc *msi;
  380. int rc;
  381. /* Disable adapter interrupts */
  382. rc = zpci_clear_airq(zdev);
  383. if (rc)
  384. return;
  385. /* Release MSI interrupts */
  386. for_each_pci_msi_entry(msi, pdev) {
  387. if (!msi->irq)
  388. continue;
  389. if (msi->msi_attrib.is_msix)
  390. __pci_msix_desc_mask_irq(msi, 1);
  391. else
  392. __pci_msi_desc_mask_irq(msi, 1, 1);
  393. irq_set_msi_desc(msi->irq, NULL);
  394. irq_free_desc(msi->irq);
  395. msi->msg.address_lo = 0;
  396. msi->msg.address_hi = 0;
  397. msi->msg.data = 0;
  398. msi->irq = 0;
  399. }
  400. if (zdev->aisb != -1UL) {
  401. zpci_aibv[zdev->aisb] = NULL;
  402. airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
  403. zdev->aisb = -1UL;
  404. }
  405. if (zdev->aibv) {
  406. airq_iv_release(zdev->aibv);
  407. zdev->aibv = NULL;
  408. }
  409. }
  410. static void zpci_map_resources(struct pci_dev *pdev)
  411. {
  412. resource_size_t len;
  413. int i;
  414. for (i = 0; i < PCI_BAR_COUNT; i++) {
  415. len = pci_resource_len(pdev, i);
  416. if (!len)
  417. continue;
  418. pdev->resource[i].start =
  419. (resource_size_t __force) pci_iomap(pdev, i, 0);
  420. pdev->resource[i].end = pdev->resource[i].start + len - 1;
  421. }
  422. }
  423. static void zpci_unmap_resources(struct pci_dev *pdev)
  424. {
  425. resource_size_t len;
  426. int i;
  427. for (i = 0; i < PCI_BAR_COUNT; i++) {
  428. len = pci_resource_len(pdev, i);
  429. if (!len)
  430. continue;
  431. pci_iounmap(pdev, (void __iomem __force *)
  432. pdev->resource[i].start);
  433. }
  434. }
  435. static struct airq_struct zpci_airq = {
  436. .handler = zpci_irq_handler,
  437. .isc = PCI_ISC,
  438. };
  439. static int __init zpci_irq_init(void)
  440. {
  441. int rc;
  442. rc = register_adapter_interrupt(&zpci_airq);
  443. if (rc)
  444. goto out;
  445. /* Set summary to 1 to be called every time for the ISC. */
  446. *zpci_airq.lsi_ptr = 1;
  447. rc = -ENOMEM;
  448. zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
  449. if (!zpci_aisb_iv)
  450. goto out_airq;
  451. zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
  452. return 0;
  453. out_airq:
  454. unregister_adapter_interrupt(&zpci_airq);
  455. out:
  456. return rc;
  457. }
  458. static void zpci_irq_exit(void)
  459. {
  460. airq_iv_release(zpci_aisb_iv);
  461. unregister_adapter_interrupt(&zpci_airq);
  462. }
  463. static int zpci_alloc_iomap(struct zpci_dev *zdev)
  464. {
  465. unsigned long entry;
  466. spin_lock(&zpci_iomap_lock);
  467. entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
  468. if (entry == ZPCI_IOMAP_ENTRIES) {
  469. spin_unlock(&zpci_iomap_lock);
  470. return -ENOSPC;
  471. }
  472. set_bit(entry, zpci_iomap_bitmap);
  473. spin_unlock(&zpci_iomap_lock);
  474. return entry;
  475. }
  476. static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
  477. {
  478. spin_lock(&zpci_iomap_lock);
  479. memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
  480. clear_bit(entry, zpci_iomap_bitmap);
  481. spin_unlock(&zpci_iomap_lock);
  482. }
  483. static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
  484. unsigned long size, unsigned long flags)
  485. {
  486. struct resource *r;
  487. r = kzalloc(sizeof(*r), GFP_KERNEL);
  488. if (!r)
  489. return NULL;
  490. r->start = start;
  491. r->end = r->start + size - 1;
  492. r->flags = flags;
  493. r->name = zdev->res_name;
  494. if (request_resource(&iomem_resource, r)) {
  495. kfree(r);
  496. return NULL;
  497. }
  498. return r;
  499. }
  500. static int zpci_setup_bus_resources(struct zpci_dev *zdev,
  501. struct list_head *resources)
  502. {
  503. unsigned long addr, size, flags;
  504. struct resource *res;
  505. int i, entry;
  506. snprintf(zdev->res_name, sizeof(zdev->res_name),
  507. "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
  508. for (i = 0; i < PCI_BAR_COUNT; i++) {
  509. if (!zdev->bars[i].size)
  510. continue;
  511. entry = zpci_alloc_iomap(zdev);
  512. if (entry < 0)
  513. return entry;
  514. zdev->bars[i].map_idx = entry;
  515. /* only MMIO is supported */
  516. flags = IORESOURCE_MEM;
  517. if (zdev->bars[i].val & 8)
  518. flags |= IORESOURCE_PREFETCH;
  519. if (zdev->bars[i].val & 4)
  520. flags |= IORESOURCE_MEM_64;
  521. addr = ZPCI_ADDR(entry);
  522. size = 1UL << zdev->bars[i].size;
  523. res = __alloc_res(zdev, addr, size, flags);
  524. if (!res) {
  525. zpci_free_iomap(zdev, entry);
  526. return -ENOMEM;
  527. }
  528. zdev->bars[i].res = res;
  529. pci_add_resource(resources, res);
  530. }
  531. return 0;
  532. }
  533. static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
  534. {
  535. int i;
  536. for (i = 0; i < PCI_BAR_COUNT; i++) {
  537. if (!zdev->bars[i].size || !zdev->bars[i].res)
  538. continue;
  539. zpci_free_iomap(zdev, zdev->bars[i].map_idx);
  540. release_resource(zdev->bars[i].res);
  541. kfree(zdev->bars[i].res);
  542. }
  543. }
  544. int pcibios_add_device(struct pci_dev *pdev)
  545. {
  546. struct resource *res;
  547. int i;
  548. pdev->dev.groups = zpci_attr_groups;
  549. pdev->dev.dma_ops = &s390_pci_dma_ops;
  550. zpci_map_resources(pdev);
  551. for (i = 0; i < PCI_BAR_COUNT; i++) {
  552. res = &pdev->resource[i];
  553. if (res->parent || !res->flags)
  554. continue;
  555. pci_claim_resource(pdev, i);
  556. }
  557. return 0;
  558. }
  559. void pcibios_release_device(struct pci_dev *pdev)
  560. {
  561. zpci_unmap_resources(pdev);
  562. }
  563. int pcibios_enable_device(struct pci_dev *pdev, int mask)
  564. {
  565. struct zpci_dev *zdev = to_zpci(pdev);
  566. zpci_debug_init_device(zdev, dev_name(&pdev->dev));
  567. zpci_fmb_enable_device(zdev);
  568. return pci_enable_resources(pdev, mask);
  569. }
  570. void pcibios_disable_device(struct pci_dev *pdev)
  571. {
  572. struct zpci_dev *zdev = to_zpci(pdev);
  573. zpci_fmb_disable_device(zdev);
  574. zpci_debug_exit_device(zdev);
  575. }
  576. #ifdef CONFIG_HIBERNATE_CALLBACKS
  577. static int zpci_restore(struct device *dev)
  578. {
  579. struct pci_dev *pdev = to_pci_dev(dev);
  580. struct zpci_dev *zdev = to_zpci(pdev);
  581. int ret = 0;
  582. if (zdev->state != ZPCI_FN_STATE_ONLINE)
  583. goto out;
  584. ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
  585. if (ret)
  586. goto out;
  587. zpci_map_resources(pdev);
  588. zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
  589. (u64) zdev->dma_table);
  590. out:
  591. return ret;
  592. }
  593. static int zpci_freeze(struct device *dev)
  594. {
  595. struct pci_dev *pdev = to_pci_dev(dev);
  596. struct zpci_dev *zdev = to_zpci(pdev);
  597. if (zdev->state != ZPCI_FN_STATE_ONLINE)
  598. return 0;
  599. zpci_unregister_ioat(zdev, 0);
  600. zpci_unmap_resources(pdev);
  601. return clp_disable_fh(zdev);
  602. }
  603. struct dev_pm_ops pcibios_pm_ops = {
  604. .thaw_noirq = zpci_restore,
  605. .freeze_noirq = zpci_freeze,
  606. .restore_noirq = zpci_restore,
  607. .poweroff_noirq = zpci_freeze,
  608. };
  609. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  610. static int zpci_alloc_domain(struct zpci_dev *zdev)
  611. {
  612. if (zpci_unique_uid) {
  613. zdev->domain = (u16) zdev->uid;
  614. if (zdev->domain >= ZPCI_NR_DEVICES)
  615. return 0;
  616. spin_lock(&zpci_domain_lock);
  617. if (test_bit(zdev->domain, zpci_domain)) {
  618. spin_unlock(&zpci_domain_lock);
  619. return -EEXIST;
  620. }
  621. set_bit(zdev->domain, zpci_domain);
  622. spin_unlock(&zpci_domain_lock);
  623. return 0;
  624. }
  625. spin_lock(&zpci_domain_lock);
  626. zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
  627. if (zdev->domain == ZPCI_NR_DEVICES) {
  628. spin_unlock(&zpci_domain_lock);
  629. return -ENOSPC;
  630. }
  631. set_bit(zdev->domain, zpci_domain);
  632. spin_unlock(&zpci_domain_lock);
  633. return 0;
  634. }
  635. static void zpci_free_domain(struct zpci_dev *zdev)
  636. {
  637. if (zdev->domain >= ZPCI_NR_DEVICES)
  638. return;
  639. spin_lock(&zpci_domain_lock);
  640. clear_bit(zdev->domain, zpci_domain);
  641. spin_unlock(&zpci_domain_lock);
  642. }
  643. void pcibios_remove_bus(struct pci_bus *bus)
  644. {
  645. struct zpci_dev *zdev = get_zdev_by_bus(bus);
  646. zpci_exit_slot(zdev);
  647. zpci_cleanup_bus_resources(zdev);
  648. zpci_destroy_iommu(zdev);
  649. zpci_free_domain(zdev);
  650. spin_lock(&zpci_list_lock);
  651. list_del(&zdev->entry);
  652. spin_unlock(&zpci_list_lock);
  653. zpci_dbg(3, "rem fid:%x\n", zdev->fid);
  654. kfree(zdev);
  655. }
  656. static int zpci_scan_bus(struct zpci_dev *zdev)
  657. {
  658. LIST_HEAD(resources);
  659. int ret;
  660. ret = zpci_setup_bus_resources(zdev, &resources);
  661. if (ret)
  662. goto error;
  663. zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
  664. zdev, &resources);
  665. if (!zdev->bus) {
  666. ret = -EIO;
  667. goto error;
  668. }
  669. zdev->bus->max_bus_speed = zdev->max_bus_speed;
  670. pci_bus_add_devices(zdev->bus);
  671. return 0;
  672. error:
  673. zpci_cleanup_bus_resources(zdev);
  674. pci_free_resource_list(&resources);
  675. return ret;
  676. }
  677. int zpci_enable_device(struct zpci_dev *zdev)
  678. {
  679. int rc;
  680. rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
  681. if (rc)
  682. goto out;
  683. rc = zpci_dma_init_device(zdev);
  684. if (rc)
  685. goto out_dma;
  686. zdev->state = ZPCI_FN_STATE_ONLINE;
  687. return 0;
  688. out_dma:
  689. clp_disable_fh(zdev);
  690. out:
  691. return rc;
  692. }
  693. EXPORT_SYMBOL_GPL(zpci_enable_device);
  694. int zpci_disable_device(struct zpci_dev *zdev)
  695. {
  696. zpci_dma_exit_device(zdev);
  697. return clp_disable_fh(zdev);
  698. }
  699. EXPORT_SYMBOL_GPL(zpci_disable_device);
  700. int zpci_create_device(struct zpci_dev *zdev)
  701. {
  702. int rc;
  703. rc = zpci_alloc_domain(zdev);
  704. if (rc)
  705. goto out;
  706. rc = zpci_init_iommu(zdev);
  707. if (rc)
  708. goto out_free;
  709. mutex_init(&zdev->lock);
  710. if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
  711. rc = zpci_enable_device(zdev);
  712. if (rc)
  713. goto out_destroy_iommu;
  714. }
  715. rc = zpci_scan_bus(zdev);
  716. if (rc)
  717. goto out_disable;
  718. spin_lock(&zpci_list_lock);
  719. list_add_tail(&zdev->entry, &zpci_list);
  720. spin_unlock(&zpci_list_lock);
  721. zpci_init_slot(zdev);
  722. return 0;
  723. out_disable:
  724. if (zdev->state == ZPCI_FN_STATE_ONLINE)
  725. zpci_disable_device(zdev);
  726. out_destroy_iommu:
  727. zpci_destroy_iommu(zdev);
  728. out_free:
  729. zpci_free_domain(zdev);
  730. out:
  731. return rc;
  732. }
  733. void zpci_remove_device(struct zpci_dev *zdev)
  734. {
  735. if (!zdev->bus)
  736. return;
  737. pci_stop_root_bus(zdev->bus);
  738. pci_remove_root_bus(zdev->bus);
  739. }
  740. int zpci_report_error(struct pci_dev *pdev,
  741. struct zpci_report_error_header *report)
  742. {
  743. struct zpci_dev *zdev = to_zpci(pdev);
  744. return sclp_pci_report(report, zdev->fh, zdev->fid);
  745. }
  746. EXPORT_SYMBOL(zpci_report_error);
  747. static int zpci_mem_init(void)
  748. {
  749. BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
  750. __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
  751. zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
  752. __alignof__(struct zpci_fmb), 0, NULL);
  753. if (!zdev_fmb_cache)
  754. goto error_fmb;
  755. zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
  756. sizeof(*zpci_iomap_start), GFP_KERNEL);
  757. if (!zpci_iomap_start)
  758. goto error_iomap;
  759. zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
  760. sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
  761. if (!zpci_iomap_bitmap)
  762. goto error_iomap_bitmap;
  763. return 0;
  764. error_iomap_bitmap:
  765. kfree(zpci_iomap_start);
  766. error_iomap:
  767. kmem_cache_destroy(zdev_fmb_cache);
  768. error_fmb:
  769. return -ENOMEM;
  770. }
  771. static void zpci_mem_exit(void)
  772. {
  773. kfree(zpci_iomap_bitmap);
  774. kfree(zpci_iomap_start);
  775. kmem_cache_destroy(zdev_fmb_cache);
  776. }
  777. static unsigned int s390_pci_probe = 1;
  778. static unsigned int s390_pci_initialized;
  779. char * __init pcibios_setup(char *str)
  780. {
  781. if (!strcmp(str, "off")) {
  782. s390_pci_probe = 0;
  783. return NULL;
  784. }
  785. return str;
  786. }
  787. bool zpci_is_enabled(void)
  788. {
  789. return s390_pci_initialized;
  790. }
  791. static int __init pci_base_init(void)
  792. {
  793. int rc;
  794. if (!s390_pci_probe)
  795. return 0;
  796. if (!test_facility(69) || !test_facility(71))
  797. return 0;
  798. rc = zpci_debug_init();
  799. if (rc)
  800. goto out;
  801. rc = zpci_mem_init();
  802. if (rc)
  803. goto out_mem;
  804. rc = zpci_irq_init();
  805. if (rc)
  806. goto out_irq;
  807. rc = zpci_dma_init();
  808. if (rc)
  809. goto out_dma;
  810. rc = clp_scan_pci_devices();
  811. if (rc)
  812. goto out_find;
  813. s390_pci_initialized = 1;
  814. return 0;
  815. out_find:
  816. zpci_dma_exit();
  817. out_dma:
  818. zpci_irq_exit();
  819. out_irq:
  820. zpci_mem_exit();
  821. out_mem:
  822. zpci_debug_exit();
  823. out:
  824. return rc;
  825. }
  826. subsys_initcall_sync(pci_base_init);
  827. void zpci_rescan(void)
  828. {
  829. if (zpci_is_enabled())
  830. clp_rescan_pci_devices_simple();
  831. }