of_iommu.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. /*
  2. * OF helpers for IOMMU
  3. *
  4. * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc.,
  17. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18. */
  19. #include <linux/export.h>
  20. #include <linux/iommu.h>
  21. #include <linux/limits.h>
  22. #include <linux/of.h>
  23. #include <linux/of_iommu.h>
  24. #include <linux/of_pci.h>
  25. #include <linux/slab.h>
  26. static const struct of_device_id __iommu_of_table_sentinel
  27. __used __section(__iommu_of_table_end);
  28. /**
  29. * of_get_dma_window - Parse *dma-window property and returns 0 if found.
  30. *
  31. * @dn: device node
  32. * @prefix: prefix for property name if any
  33. * @index: index to start to parse
  34. * @busno: Returns busno if supported. Otherwise pass NULL
  35. * @addr: Returns address that DMA starts
  36. * @size: Returns the range that DMA can handle
  37. *
  38. * This supports different formats flexibly. "prefix" can be
  39. * configured if any. "busno" and "index" are optionally
  40. * specified. Set 0(or NULL) if not used.
  41. */
  42. int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
  43. unsigned long *busno, dma_addr_t *addr, size_t *size)
  44. {
  45. const __be32 *dma_window, *end;
  46. int bytes, cur_index = 0;
  47. char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX];
  48. if (!dn || !addr || !size)
  49. return -EINVAL;
  50. if (!prefix)
  51. prefix = "";
  52. snprintf(propname, sizeof(propname), "%sdma-window", prefix);
  53. snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix);
  54. snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix);
  55. dma_window = of_get_property(dn, propname, &bytes);
  56. if (!dma_window)
  57. return -ENODEV;
  58. end = dma_window + bytes / sizeof(*dma_window);
  59. while (dma_window < end) {
  60. u32 cells;
  61. const void *prop;
  62. /* busno is one cell if supported */
  63. if (busno)
  64. *busno = be32_to_cpup(dma_window++);
  65. prop = of_get_property(dn, addrname, NULL);
  66. if (!prop)
  67. prop = of_get_property(dn, "#address-cells", NULL);
  68. cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn);
  69. if (!cells)
  70. return -EINVAL;
  71. *addr = of_read_number(dma_window, cells);
  72. dma_window += cells;
  73. prop = of_get_property(dn, sizename, NULL);
  74. cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn);
  75. if (!cells)
  76. return -EINVAL;
  77. *size = of_read_number(dma_window, cells);
  78. dma_window += cells;
  79. if (cur_index++ == index)
  80. break;
  81. }
  82. return 0;
  83. }
  84. EXPORT_SYMBOL_GPL(of_get_dma_window);
  85. static bool of_iommu_driver_present(struct device_node *np)
  86. {
  87. /*
  88. * If the IOMMU still isn't ready by the time we reach init, assume
  89. * it never will be. We don't want to defer indefinitely, nor attempt
  90. * to dereference __iommu_of_table after it's been freed.
  91. */
  92. if (system_state >= SYSTEM_RUNNING)
  93. return false;
  94. return of_match_node(&__iommu_of_table, np);
  95. }
  96. static const struct iommu_ops
  97. *of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec)
  98. {
  99. const struct iommu_ops *ops;
  100. struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
  101. int err;
  102. ops = iommu_ops_from_fwnode(fwnode);
  103. if ((ops && !ops->of_xlate) ||
  104. !of_device_is_available(iommu_spec->np) ||
  105. (!ops && !of_iommu_driver_present(iommu_spec->np)))
  106. return NULL;
  107. err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
  108. if (err)
  109. return ERR_PTR(err);
  110. /*
  111. * The otherwise-empty fwspec handily serves to indicate the specific
  112. * IOMMU device we're waiting for, which will be useful if we ever get
  113. * a proper probe-ordering dependency mechanism in future.
  114. */
  115. if (!ops)
  116. return ERR_PTR(-EPROBE_DEFER);
  117. err = ops->of_xlate(dev, iommu_spec);
  118. if (err)
  119. return ERR_PTR(err);
  120. return ops;
  121. }
  122. static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
  123. {
  124. struct of_phandle_args *iommu_spec = data;
  125. iommu_spec->args[0] = alias;
  126. return iommu_spec->np == pdev->bus->dev.of_node;
  127. }
  128. static const struct iommu_ops
  129. *of_pci_iommu_init(struct pci_dev *pdev, struct device_node *bridge_np)
  130. {
  131. const struct iommu_ops *ops;
  132. struct of_phandle_args iommu_spec;
  133. int err;
  134. /*
  135. * Start by tracing the RID alias down the PCI topology as
  136. * far as the host bridge whose OF node we have...
  137. * (we're not even attempting to handle multi-alias devices yet)
  138. */
  139. iommu_spec.args_count = 1;
  140. iommu_spec.np = bridge_np;
  141. pci_for_each_dma_alias(pdev, __get_pci_rid, &iommu_spec);
  142. /*
  143. * ...then find out what that becomes once it escapes the PCI
  144. * bus into the system beyond, and which IOMMU it ends up at.
  145. */
  146. iommu_spec.np = NULL;
  147. err = of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map",
  148. "iommu-map-mask", &iommu_spec.np,
  149. iommu_spec.args);
  150. if (err)
  151. return err == -ENODEV ? NULL : ERR_PTR(err);
  152. ops = of_iommu_xlate(&pdev->dev, &iommu_spec);
  153. of_node_put(iommu_spec.np);
  154. return ops;
  155. }
  156. static const struct iommu_ops
  157. *of_platform_iommu_init(struct device *dev, struct device_node *np)
  158. {
  159. struct of_phandle_args iommu_spec;
  160. const struct iommu_ops *ops = NULL;
  161. int idx = 0;
  162. /*
  163. * We don't currently walk up the tree looking for a parent IOMMU.
  164. * See the `Notes:' section of
  165. * Documentation/devicetree/bindings/iommu/iommu.txt
  166. */
  167. while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
  168. idx, &iommu_spec)) {
  169. ops = of_iommu_xlate(dev, &iommu_spec);
  170. of_node_put(iommu_spec.np);
  171. idx++;
  172. if (IS_ERR_OR_NULL(ops))
  173. break;
  174. }
  175. return ops;
  176. }
  177. const struct iommu_ops *of_iommu_configure(struct device *dev,
  178. struct device_node *master_np)
  179. {
  180. const struct iommu_ops *ops;
  181. struct iommu_fwspec *fwspec = dev->iommu_fwspec;
  182. if (!master_np)
  183. return NULL;
  184. if (fwspec) {
  185. if (fwspec->ops)
  186. return fwspec->ops;
  187. /* In the deferred case, start again from scratch */
  188. iommu_fwspec_free(dev);
  189. }
  190. if (dev_is_pci(dev))
  191. ops = of_pci_iommu_init(to_pci_dev(dev), master_np);
  192. else
  193. ops = of_platform_iommu_init(dev, master_np);
  194. /*
  195. * If we have reason to believe the IOMMU driver missed the initial
  196. * add_device callback for dev, replay it to get things in order.
  197. */
  198. if (!IS_ERR_OR_NULL(ops) && ops->add_device &&
  199. dev->bus && !dev->iommu_group) {
  200. int err = ops->add_device(dev);
  201. if (err)
  202. ops = ERR_PTR(err);
  203. }
  204. /* Ignore all other errors apart from EPROBE_DEFER */
  205. if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
  206. dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
  207. ops = NULL;
  208. }
  209. return ops;
  210. }
  211. static int __init of_iommu_init(void)
  212. {
  213. struct device_node *np;
  214. const struct of_device_id *match, *matches = &__iommu_of_table;
  215. for_each_matching_node_and_match(np, matches, &match) {
  216. const of_iommu_init_fn init_fn = match->data;
  217. if (init_fn && init_fn(np))
  218. pr_err("Failed to initialise IOMMU %s\n",
  219. of_node_full_name(np));
  220. }
  221. return 0;
  222. }
  223. postcore_initcall_sync(of_iommu_init);