of.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * PCI <-> OF mapping helpers
  4. *
  5. * Copyright 2011 IBM Corp.
  6. */
  7. #define pr_fmt(fmt) "PCI: OF: " fmt
  8. #include <linux/irqdomain.h>
  9. #include <linux/kernel.h>
  10. #include <linux/pci.h>
  11. #include <linux/of.h>
  12. #include <linux/of_irq.h>
  13. #include <linux/of_address.h>
  14. #include <linux/of_pci.h>
  15. #include "pci.h"
  16. void pci_set_of_node(struct pci_dev *dev)
  17. {
  18. if (!dev->bus->dev.of_node)
  19. return;
  20. dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
  21. dev->devfn);
  22. }
  23. void pci_release_of_node(struct pci_dev *dev)
  24. {
  25. of_node_put(dev->dev.of_node);
  26. dev->dev.of_node = NULL;
  27. }
  28. void pci_set_bus_of_node(struct pci_bus *bus)
  29. {
  30. if (bus->self == NULL)
  31. bus->dev.of_node = pcibios_get_phb_of_node(bus);
  32. else
  33. bus->dev.of_node = of_node_get(bus->self->dev.of_node);
  34. }
  35. void pci_release_bus_of_node(struct pci_bus *bus)
  36. {
  37. of_node_put(bus->dev.of_node);
  38. bus->dev.of_node = NULL;
  39. }
  40. struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
  41. {
  42. /* This should only be called for PHBs */
  43. if (WARN_ON(bus->self || bus->parent))
  44. return NULL;
  45. /*
  46. * Look for a node pointer in either the intermediary device we
  47. * create above the root bus or its own parent. Normally only
  48. * the later is populated.
  49. */
  50. if (bus->bridge->of_node)
  51. return of_node_get(bus->bridge->of_node);
  52. if (bus->bridge->parent && bus->bridge->parent->of_node)
  53. return of_node_get(bus->bridge->parent->of_node);
  54. return NULL;
  55. }
  56. struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
  57. {
  58. #ifdef CONFIG_IRQ_DOMAIN
  59. struct irq_domain *d;
  60. if (!bus->dev.of_node)
  61. return NULL;
  62. /* Start looking for a phandle to an MSI controller. */
  63. d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
  64. if (d)
  65. return d;
  66. /*
  67. * If we don't have an msi-parent property, look for a domain
  68. * directly attached to the host bridge.
  69. */
  70. d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
  71. if (d)
  72. return d;
  73. return irq_find_host(bus->dev.of_node);
  74. #else
  75. return NULL;
  76. #endif
  77. }
  78. static inline int __of_pci_pci_compare(struct device_node *node,
  79. unsigned int data)
  80. {
  81. int devfn;
  82. devfn = of_pci_get_devfn(node);
  83. if (devfn < 0)
  84. return 0;
  85. return devfn == data;
  86. }
  87. struct device_node *of_pci_find_child_device(struct device_node *parent,
  88. unsigned int devfn)
  89. {
  90. struct device_node *node, *node2;
  91. for_each_child_of_node(parent, node) {
  92. if (__of_pci_pci_compare(node, devfn))
  93. return node;
  94. /*
  95. * Some OFs create a parent node "multifunc-device" as
  96. * a fake root for all functions of a multi-function
  97. * device we go down them as well.
  98. */
  99. if (!strcmp(node->name, "multifunc-device")) {
  100. for_each_child_of_node(node, node2) {
  101. if (__of_pci_pci_compare(node2, devfn)) {
  102. of_node_put(node);
  103. return node2;
  104. }
  105. }
  106. }
  107. }
  108. return NULL;
  109. }
  110. EXPORT_SYMBOL_GPL(of_pci_find_child_device);
  111. /**
  112. * of_pci_get_devfn() - Get device and function numbers for a device node
  113. * @np: device node
  114. *
  115. * Parses a standard 5-cell PCI resource and returns an 8-bit value that can
  116. * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
  117. * and function numbers respectively. On error a negative error code is
  118. * returned.
  119. */
  120. int of_pci_get_devfn(struct device_node *np)
  121. {
  122. u32 reg[5];
  123. int error;
  124. error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
  125. if (error)
  126. return error;
  127. return (reg[0] >> 8) & 0xff;
  128. }
  129. EXPORT_SYMBOL_GPL(of_pci_get_devfn);
  130. /**
  131. * of_pci_parse_bus_range() - parse the bus-range property of a PCI device
  132. * @node: device node
  133. * @res: address to a struct resource to return the bus-range
  134. *
  135. * Returns 0 on success or a negative error-code on failure.
  136. */
  137. int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
  138. {
  139. u32 bus_range[2];
  140. int error;
  141. error = of_property_read_u32_array(node, "bus-range", bus_range,
  142. ARRAY_SIZE(bus_range));
  143. if (error)
  144. return error;
  145. res->name = node->name;
  146. res->start = bus_range[0];
  147. res->end = bus_range[1];
  148. res->flags = IORESOURCE_BUS;
  149. return 0;
  150. }
  151. EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
  152. /**
  153. * This function will try to obtain the host bridge domain number by
  154. * finding a property called "linux,pci-domain" of the given device node.
  155. *
  156. * @node: device tree node with the domain information
  157. *
  158. * Returns the associated domain number from DT in the range [0-0xffff], or
  159. * a negative value if the required property is not found.
  160. */
  161. int of_get_pci_domain_nr(struct device_node *node)
  162. {
  163. u32 domain;
  164. int error;
  165. error = of_property_read_u32(node, "linux,pci-domain", &domain);
  166. if (error)
  167. return error;
  168. return (u16)domain;
  169. }
  170. EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
  171. /**
  172. * This function will try to find the limitation of link speed by finding
  173. * a property called "max-link-speed" of the given device node.
  174. *
  175. * @node: device tree node with the max link speed information
  176. *
  177. * Returns the associated max link speed from DT, or a negative value if the
  178. * required property is not found or is invalid.
  179. */
  180. int of_pci_get_max_link_speed(struct device_node *node)
  181. {
  182. u32 max_link_speed;
  183. if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
  184. max_link_speed > 4)
  185. return -EINVAL;
  186. return max_link_speed;
  187. }
  188. EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
  189. /**
  190. * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
  191. * is present and valid
  192. */
  193. void of_pci_check_probe_only(void)
  194. {
  195. u32 val;
  196. int ret;
  197. ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
  198. if (ret) {
  199. if (ret == -ENODATA || ret == -EOVERFLOW)
  200. pr_warn("linux,pci-probe-only without valid value, ignoring\n");
  201. return;
  202. }
  203. if (val)
  204. pci_add_flags(PCI_PROBE_ONLY);
  205. else
  206. pci_clear_flags(PCI_PROBE_ONLY);
  207. pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
  208. }
  209. EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
  210. #if defined(CONFIG_OF_ADDRESS)
  211. /**
  212. * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
  213. * @dev: device node of the host bridge having the range property
  214. * @busno: bus number associated with the bridge root bus
  215. * @bus_max: maximum number of buses for this bridge
  216. * @resources: list where the range of resources will be added after DT parsing
  217. * @io_base: pointer to a variable that will contain on return the physical
  218. * address for the start of the I/O range. Can be NULL if the caller doesn't
  219. * expect I/O ranges to be present in the device tree.
  220. *
  221. * It is the caller's job to free the @resources list.
  222. *
  223. * This function will parse the "ranges" property of a PCI host bridge device
  224. * node and setup the resource mapping based on its content. It is expected
  225. * that the property conforms with the Power ePAPR document.
  226. *
  227. * It returns zero if the range parsing has been successful or a standard error
  228. * value if it failed.
  229. */
  230. int of_pci_get_host_bridge_resources(struct device_node *dev,
  231. unsigned char busno, unsigned char bus_max,
  232. struct list_head *resources, resource_size_t *io_base)
  233. {
  234. struct resource_entry *window;
  235. struct resource *res;
  236. struct resource *bus_range;
  237. struct of_pci_range range;
  238. struct of_pci_range_parser parser;
  239. char range_type[4];
  240. int err;
  241. if (io_base)
  242. *io_base = (resource_size_t)OF_BAD_ADDR;
  243. bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
  244. if (!bus_range)
  245. return -ENOMEM;
  246. pr_info("host bridge %pOF ranges:\n", dev);
  247. err = of_pci_parse_bus_range(dev, bus_range);
  248. if (err) {
  249. bus_range->start = busno;
  250. bus_range->end = bus_max;
  251. bus_range->flags = IORESOURCE_BUS;
  252. pr_info(" No bus range found for %pOF, using %pR\n",
  253. dev, bus_range);
  254. } else {
  255. if (bus_range->end > bus_range->start + bus_max)
  256. bus_range->end = bus_range->start + bus_max;
  257. }
  258. pci_add_resource(resources, bus_range);
  259. /* Check for ranges property */
  260. err = of_pci_range_parser_init(&parser, dev);
  261. if (err)
  262. goto parse_failed;
  263. pr_debug("Parsing ranges property...\n");
  264. for_each_of_pci_range(&parser, &range) {
  265. /* Read next ranges element */
  266. if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
  267. snprintf(range_type, 4, " IO");
  268. else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
  269. snprintf(range_type, 4, "MEM");
  270. else
  271. snprintf(range_type, 4, "err");
  272. pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type,
  273. range.cpu_addr, range.cpu_addr + range.size - 1,
  274. range.pci_addr);
  275. /*
  276. * If we failed translation or got a zero-sized region
  277. * then skip this range
  278. */
  279. if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
  280. continue;
  281. res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  282. if (!res) {
  283. err = -ENOMEM;
  284. goto parse_failed;
  285. }
  286. err = of_pci_range_to_resource(&range, dev, res);
  287. if (err) {
  288. kfree(res);
  289. continue;
  290. }
  291. if (resource_type(res) == IORESOURCE_IO) {
  292. if (!io_base) {
  293. pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
  294. dev);
  295. err = -EINVAL;
  296. goto conversion_failed;
  297. }
  298. if (*io_base != (resource_size_t)OF_BAD_ADDR)
  299. pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
  300. dev);
  301. *io_base = range.cpu_addr;
  302. }
  303. pci_add_resource_offset(resources, res, res->start - range.pci_addr);
  304. }
  305. return 0;
  306. conversion_failed:
  307. kfree(res);
  308. parse_failed:
  309. resource_list_for_each_entry(window, resources)
  310. kfree(window->res);
  311. pci_free_resource_list(resources);
  312. return err;
  313. }
  314. EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
  315. #endif /* CONFIG_OF_ADDRESS */
  316. /**
  317. * of_pci_map_rid - Translate a requester ID through a downstream mapping.
  318. * @np: root complex device node.
  319. * @rid: PCI requester ID to map.
  320. * @map_name: property name of the map to use.
  321. * @map_mask_name: optional property name of the mask to use.
  322. * @target: optional pointer to a target device node.
  323. * @id_out: optional pointer to receive the translated ID.
  324. *
  325. * Given a PCI requester ID, look up the appropriate implementation-defined
  326. * platform ID and/or the target device which receives transactions on that
  327. * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
  328. * @id_out may be NULL if only the other is required. If @target points to
  329. * a non-NULL device node pointer, only entries targeting that node will be
  330. * matched; if it points to a NULL value, it will receive the device node of
  331. * the first matching target phandle, with a reference held.
  332. *
  333. * Return: 0 on success or a standard error code on failure.
  334. */
  335. int of_pci_map_rid(struct device_node *np, u32 rid,
  336. const char *map_name, const char *map_mask_name,
  337. struct device_node **target, u32 *id_out)
  338. {
  339. u32 map_mask, masked_rid;
  340. int map_len;
  341. const __be32 *map = NULL;
  342. if (!np || !map_name || (!target && !id_out))
  343. return -EINVAL;
  344. map = of_get_property(np, map_name, &map_len);
  345. if (!map) {
  346. if (target)
  347. return -ENODEV;
  348. /* Otherwise, no map implies no translation */
  349. *id_out = rid;
  350. return 0;
  351. }
  352. if (!map_len || map_len % (4 * sizeof(*map))) {
  353. pr_err("%pOF: Error: Bad %s length: %d\n", np,
  354. map_name, map_len);
  355. return -EINVAL;
  356. }
  357. /* The default is to select all bits. */
  358. map_mask = 0xffffffff;
  359. /*
  360. * Can be overridden by "{iommu,msi}-map-mask" property.
  361. * If of_property_read_u32() fails, the default is used.
  362. */
  363. if (map_mask_name)
  364. of_property_read_u32(np, map_mask_name, &map_mask);
  365. masked_rid = map_mask & rid;
  366. for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
  367. struct device_node *phandle_node;
  368. u32 rid_base = be32_to_cpup(map + 0);
  369. u32 phandle = be32_to_cpup(map + 1);
  370. u32 out_base = be32_to_cpup(map + 2);
  371. u32 rid_len = be32_to_cpup(map + 3);
  372. if (rid_base & ~map_mask) {
  373. pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
  374. np, map_name, map_name,
  375. map_mask, rid_base);
  376. return -EFAULT;
  377. }
  378. if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
  379. continue;
  380. phandle_node = of_find_node_by_phandle(phandle);
  381. if (!phandle_node)
  382. return -ENODEV;
  383. if (target) {
  384. if (*target)
  385. of_node_put(phandle_node);
  386. else
  387. *target = phandle_node;
  388. if (*target != phandle_node)
  389. continue;
  390. }
  391. if (id_out)
  392. *id_out = masked_rid - rid_base + out_base;
  393. pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
  394. np, map_name, map_mask, rid_base, out_base,
  395. rid_len, rid, masked_rid - rid_base + out_base);
  396. return 0;
  397. }
  398. pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
  399. np, map_name, rid, target && *target ? *target : NULL);
  400. return -EFAULT;
  401. }
  402. #if IS_ENABLED(CONFIG_OF_IRQ)
  403. /**
  404. * of_irq_parse_pci - Resolve the interrupt for a PCI device
  405. * @pdev: the device whose interrupt is to be resolved
  406. * @out_irq: structure of_irq filled by this function
  407. *
  408. * This function resolves the PCI interrupt for a given PCI device. If a
  409. * device-node exists for a given pci_dev, it will use normal OF tree
  410. * walking. If not, it will implement standard swizzling and walk up the
  411. * PCI tree until an device-node is found, at which point it will finish
  412. * resolving using the OF tree walking.
  413. */
  414. static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
  415. {
  416. struct device_node *dn, *ppnode;
  417. struct pci_dev *ppdev;
  418. __be32 laddr[3];
  419. u8 pin;
  420. int rc;
  421. /*
  422. * Check if we have a device node, if yes, fallback to standard
  423. * device tree parsing
  424. */
  425. dn = pci_device_to_OF_node(pdev);
  426. if (dn) {
  427. rc = of_irq_parse_one(dn, 0, out_irq);
  428. if (!rc)
  429. return rc;
  430. }
  431. /*
  432. * Ok, we don't, time to have fun. Let's start by building up an
  433. * interrupt spec. we assume #interrupt-cells is 1, which is standard
  434. * for PCI. If you do different, then don't use that routine.
  435. */
  436. rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
  437. if (rc != 0)
  438. goto err;
  439. /* No pin, exit with no error message. */
  440. if (pin == 0)
  441. return -ENODEV;
  442. /* Now we walk up the PCI tree */
  443. for (;;) {
  444. /* Get the pci_dev of our parent */
  445. ppdev = pdev->bus->self;
  446. /* Ouch, it's a host bridge... */
  447. if (ppdev == NULL) {
  448. ppnode = pci_bus_to_OF_node(pdev->bus);
  449. /* No node for host bridge ? give up */
  450. if (ppnode == NULL) {
  451. rc = -EINVAL;
  452. goto err;
  453. }
  454. } else {
  455. /* We found a P2P bridge, check if it has a node */
  456. ppnode = pci_device_to_OF_node(ppdev);
  457. }
  458. /*
  459. * Ok, we have found a parent with a device-node, hand over to
  460. * the OF parsing code.
  461. * We build a unit address from the linux device to be used for
  462. * resolution. Note that we use the linux bus number which may
  463. * not match your firmware bus numbering.
  464. * Fortunately, in most cases, interrupt-map-mask doesn't
  465. * include the bus number as part of the matching.
  466. * You should still be careful about that though if you intend
  467. * to rely on this function (you ship a firmware that doesn't
  468. * create device nodes for all PCI devices).
  469. */
  470. if (ppnode)
  471. break;
  472. /*
  473. * We can only get here if we hit a P2P bridge with no node;
  474. * let's do standard swizzling and try again
  475. */
  476. pin = pci_swizzle_interrupt_pin(pdev, pin);
  477. pdev = ppdev;
  478. }
  479. out_irq->np = ppnode;
  480. out_irq->args_count = 1;
  481. out_irq->args[0] = pin;
  482. laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
  483. laddr[1] = laddr[2] = cpu_to_be32(0);
  484. rc = of_irq_parse_raw(laddr, out_irq);
  485. if (rc)
  486. goto err;
  487. return 0;
  488. err:
  489. if (rc == -ENOENT) {
  490. dev_warn(&pdev->dev,
  491. "%s: no interrupt-map found, INTx interrupts not available\n",
  492. __func__);
  493. pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
  494. __func__);
  495. } else {
  496. dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
  497. }
  498. return rc;
  499. }
  500. /**
  501. * of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ
  502. * @dev: The PCI device needing an IRQ
  503. * @slot: PCI slot number; passed when used as map_irq callback. Unused
  504. * @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused
  505. *
  506. * @slot and @pin are unused, but included in the function so that this
  507. * function can be used directly as the map_irq callback to
  508. * pci_assign_irq() and struct pci_host_bridge.map_irq pointer
  509. */
  510. int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
  511. {
  512. struct of_phandle_args oirq;
  513. int ret;
  514. ret = of_irq_parse_pci(dev, &oirq);
  515. if (ret)
  516. return 0; /* Proper return code 0 == NO_IRQ */
  517. return irq_create_of_mapping(&oirq);
  518. }
  519. EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
  520. #endif /* CONFIG_OF_IRQ */
  521. int pci_parse_request_of_pci_ranges(struct device *dev,
  522. struct list_head *resources,
  523. struct resource **bus_range)
  524. {
  525. int err, res_valid = 0;
  526. struct device_node *np = dev->of_node;
  527. resource_size_t iobase;
  528. struct resource_entry *win, *tmp;
  529. INIT_LIST_HEAD(resources);
  530. err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
  531. if (err)
  532. return err;
  533. err = devm_request_pci_bus_resources(dev, resources);
  534. if (err)
  535. goto out_release_res;
  536. resource_list_for_each_entry_safe(win, tmp, resources) {
  537. struct resource *res = win->res;
  538. switch (resource_type(res)) {
  539. case IORESOURCE_IO:
  540. err = pci_remap_iospace(res, iobase);
  541. if (err) {
  542. dev_warn(dev, "error %d: failed to map resource %pR\n",
  543. err, res);
  544. resource_list_destroy_entry(win);
  545. }
  546. break;
  547. case IORESOURCE_MEM:
  548. res_valid |= !(res->flags & IORESOURCE_PREFETCH);
  549. break;
  550. case IORESOURCE_BUS:
  551. if (bus_range)
  552. *bus_range = res;
  553. break;
  554. }
  555. }
  556. if (res_valid)
  557. return 0;
  558. dev_err(dev, "non-prefetchable memory resource required\n");
  559. err = -EINVAL;
  560. out_release_res:
  561. pci_free_resource_list(resources);
  562. return err;
  563. }