devres.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. #include <linux/err.h>
  2. #include <linux/pci.h>
  3. #include <linux/io.h>
  4. #include <linux/gfp.h>
  5. #include <linux/export.h>
  6. void devm_ioremap_release(struct device *dev, void *res)
  7. {
  8. iounmap(*(void __iomem **)res);
  9. }
  10. static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
  11. {
  12. return *(void **)res == match_data;
  13. }
  14. /**
  15. * devm_ioremap - Managed ioremap()
  16. * @dev: Generic device to remap IO address for
  17. * @offset: BUS offset to map
  18. * @size: Size of map
  19. *
  20. * Managed ioremap(). Map is automatically unmapped on driver detach.
  21. */
  22. void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
  23. resource_size_t size)
  24. {
  25. void __iomem **ptr, *addr;
  26. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  27. if (!ptr)
  28. return NULL;
  29. addr = ioremap(offset, size);
  30. if (addr) {
  31. *ptr = addr;
  32. devres_add(dev, ptr);
  33. } else
  34. devres_free(ptr);
  35. return addr;
  36. }
  37. EXPORT_SYMBOL(devm_ioremap);
  38. /**
  39. * devm_ioremap_nocache - Managed ioremap_nocache()
  40. * @dev: Generic device to remap IO address for
  41. * @offset: BUS offset to map
  42. * @size: Size of map
  43. *
  44. * Managed ioremap_nocache(). Map is automatically unmapped on driver
  45. * detach.
  46. */
  47. void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
  48. resource_size_t size)
  49. {
  50. void __iomem **ptr, *addr;
  51. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  52. if (!ptr)
  53. return NULL;
  54. addr = ioremap_nocache(offset, size);
  55. if (addr) {
  56. *ptr = addr;
  57. devres_add(dev, ptr);
  58. } else
  59. devres_free(ptr);
  60. return addr;
  61. }
  62. EXPORT_SYMBOL(devm_ioremap_nocache);
  63. /**
  64. * devm_iounmap - Managed iounmap()
  65. * @dev: Generic device to unmap for
  66. * @addr: Address to unmap
  67. *
  68. * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
  69. */
  70. void devm_iounmap(struct device *dev, void __iomem *addr)
  71. {
  72. WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
  73. (__force void *)addr));
  74. iounmap(addr);
  75. }
  76. EXPORT_SYMBOL(devm_iounmap);
  77. /**
  78. * devm_ioremap_resource() - check, request region, and ioremap resource
  79. * @dev: generic device to handle the resource for
  80. * @res: resource to be handled
  81. *
  82. * Checks that a resource is a valid memory region, requests the memory region
  83. * and ioremaps it either as cacheable or as non-cacheable memory depending on
  84. * the resource's flags. All operations are managed and will be undone on
  85. * driver detach.
  86. *
  87. * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
  88. * on failure. Usage example:
  89. *
  90. * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  91. * base = devm_ioremap_resource(&pdev->dev, res);
  92. * if (IS_ERR(base))
  93. * return PTR_ERR(base);
  94. */
  95. void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
  96. {
  97. resource_size_t size;
  98. const char *name;
  99. void __iomem *dest_ptr;
  100. BUG_ON(!dev);
  101. if (!res || resource_type(res) != IORESOURCE_MEM) {
  102. dev_err(dev, "invalid resource\n");
  103. return IOMEM_ERR_PTR(-EINVAL);
  104. }
  105. size = resource_size(res);
  106. name = res->name ?: dev_name(dev);
  107. if (!devm_request_mem_region(dev, res->start, size, name)) {
  108. dev_err(dev, "can't request region for resource %pR\n", res);
  109. return IOMEM_ERR_PTR(-EBUSY);
  110. }
  111. if (res->flags & IORESOURCE_CACHEABLE)
  112. dest_ptr = devm_ioremap(dev, res->start, size);
  113. else
  114. dest_ptr = devm_ioremap_nocache(dev, res->start, size);
  115. if (!dest_ptr) {
  116. dev_err(dev, "ioremap failed for resource %pR\n", res);
  117. devm_release_mem_region(dev, res->start, size);
  118. dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
  119. }
  120. return dest_ptr;
  121. }
  122. EXPORT_SYMBOL(devm_ioremap_resource);
  123. #ifdef CONFIG_HAS_IOPORT_MAP
  124. /*
  125. * Generic iomap devres
  126. */
  127. static void devm_ioport_map_release(struct device *dev, void *res)
  128. {
  129. ioport_unmap(*(void __iomem **)res);
  130. }
  131. static int devm_ioport_map_match(struct device *dev, void *res,
  132. void *match_data)
  133. {
  134. return *(void **)res == match_data;
  135. }
  136. /**
  137. * devm_ioport_map - Managed ioport_map()
  138. * @dev: Generic device to map ioport for
  139. * @port: Port to map
  140. * @nr: Number of ports to map
  141. *
  142. * Managed ioport_map(). Map is automatically unmapped on driver
  143. * detach.
  144. */
  145. void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
  146. unsigned int nr)
  147. {
  148. void __iomem **ptr, *addr;
  149. ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
  150. if (!ptr)
  151. return NULL;
  152. addr = ioport_map(port, nr);
  153. if (addr) {
  154. *ptr = addr;
  155. devres_add(dev, ptr);
  156. } else
  157. devres_free(ptr);
  158. return addr;
  159. }
  160. EXPORT_SYMBOL(devm_ioport_map);
  161. /**
  162. * devm_ioport_unmap - Managed ioport_unmap()
  163. * @dev: Generic device to unmap for
  164. * @addr: Address to unmap
  165. *
  166. * Managed ioport_unmap(). @addr must have been mapped using
  167. * devm_ioport_map().
  168. */
  169. void devm_ioport_unmap(struct device *dev, void __iomem *addr)
  170. {
  171. ioport_unmap(addr);
  172. WARN_ON(devres_destroy(dev, devm_ioport_map_release,
  173. devm_ioport_map_match, (__force void *)addr));
  174. }
  175. EXPORT_SYMBOL(devm_ioport_unmap);
  176. #endif /* CONFIG_HAS_IOPORT_MAP */
  177. #ifdef CONFIG_PCI
  178. /*
  179. * PCI iomap devres
  180. */
  181. #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
  182. struct pcim_iomap_devres {
  183. void __iomem *table[PCIM_IOMAP_MAX];
  184. };
  185. static void pcim_iomap_release(struct device *gendev, void *res)
  186. {
  187. struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
  188. struct pcim_iomap_devres *this = res;
  189. int i;
  190. for (i = 0; i < PCIM_IOMAP_MAX; i++)
  191. if (this->table[i])
  192. pci_iounmap(dev, this->table[i]);
  193. }
  194. /**
  195. * pcim_iomap_table - access iomap allocation table
  196. * @pdev: PCI device to access iomap table for
  197. *
  198. * Access iomap allocation table for @dev. If iomap table doesn't
  199. * exist and @pdev is managed, it will be allocated. All iomaps
  200. * recorded in the iomap table are automatically unmapped on driver
  201. * detach.
  202. *
  203. * This function might sleep when the table is first allocated but can
  204. * be safely called without context and guaranteed to succed once
  205. * allocated.
  206. */
  207. void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
  208. {
  209. struct pcim_iomap_devres *dr, *new_dr;
  210. dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
  211. if (dr)
  212. return dr->table;
  213. new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
  214. if (!new_dr)
  215. return NULL;
  216. dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
  217. return dr->table;
  218. }
  219. EXPORT_SYMBOL(pcim_iomap_table);
  220. /**
  221. * pcim_iomap - Managed pcim_iomap()
  222. * @pdev: PCI device to iomap for
  223. * @bar: BAR to iomap
  224. * @maxlen: Maximum length of iomap
  225. *
  226. * Managed pci_iomap(). Map is automatically unmapped on driver
  227. * detach.
  228. */
  229. void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
  230. {
  231. void __iomem **tbl;
  232. BUG_ON(bar >= PCIM_IOMAP_MAX);
  233. tbl = (void __iomem **)pcim_iomap_table(pdev);
  234. if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
  235. return NULL;
  236. tbl[bar] = pci_iomap(pdev, bar, maxlen);
  237. return tbl[bar];
  238. }
  239. EXPORT_SYMBOL(pcim_iomap);
  240. /**
  241. * pcim_iounmap - Managed pci_iounmap()
  242. * @pdev: PCI device to iounmap for
  243. * @addr: Address to unmap
  244. *
  245. * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
  246. */
  247. void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
  248. {
  249. void __iomem **tbl;
  250. int i;
  251. pci_iounmap(pdev, addr);
  252. tbl = (void __iomem **)pcim_iomap_table(pdev);
  253. BUG_ON(!tbl);
  254. for (i = 0; i < PCIM_IOMAP_MAX; i++)
  255. if (tbl[i] == addr) {
  256. tbl[i] = NULL;
  257. return;
  258. }
  259. WARN_ON(1);
  260. }
  261. EXPORT_SYMBOL(pcim_iounmap);
  262. /**
  263. * pcim_iomap_regions - Request and iomap PCI BARs
  264. * @pdev: PCI device to map IO resources for
  265. * @mask: Mask of BARs to request and iomap
  266. * @name: Name used when requesting regions
  267. *
  268. * Request and iomap regions specified by @mask.
  269. */
  270. int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
  271. {
  272. void __iomem * const *iomap;
  273. int i, rc;
  274. iomap = pcim_iomap_table(pdev);
  275. if (!iomap)
  276. return -ENOMEM;
  277. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  278. unsigned long len;
  279. if (!(mask & (1 << i)))
  280. continue;
  281. rc = -EINVAL;
  282. len = pci_resource_len(pdev, i);
  283. if (!len)
  284. goto err_inval;
  285. rc = pci_request_region(pdev, i, name);
  286. if (rc)
  287. goto err_inval;
  288. rc = -ENOMEM;
  289. if (!pcim_iomap(pdev, i, 0))
  290. goto err_region;
  291. }
  292. return 0;
  293. err_region:
  294. pci_release_region(pdev, i);
  295. err_inval:
  296. while (--i >= 0) {
  297. if (!(mask & (1 << i)))
  298. continue;
  299. pcim_iounmap(pdev, iomap[i]);
  300. pci_release_region(pdev, i);
  301. }
  302. return rc;
  303. }
  304. EXPORT_SYMBOL(pcim_iomap_regions);
  305. /**
  306. * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
  307. * @pdev: PCI device to map IO resources for
  308. * @mask: Mask of BARs to iomap
  309. * @name: Name used when requesting regions
  310. *
  311. * Request all PCI BARs and iomap regions specified by @mask.
  312. */
  313. int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
  314. const char *name)
  315. {
  316. int request_mask = ((1 << 6) - 1) & ~mask;
  317. int rc;
  318. rc = pci_request_selected_regions(pdev, request_mask, name);
  319. if (rc)
  320. return rc;
  321. rc = pcim_iomap_regions(pdev, mask, name);
  322. if (rc)
  323. pci_release_selected_regions(pdev, request_mask);
  324. return rc;
  325. }
  326. EXPORT_SYMBOL(pcim_iomap_regions_request_all);
  327. /**
  328. * pcim_iounmap_regions - Unmap and release PCI BARs
  329. * @pdev: PCI device to map IO resources for
  330. * @mask: Mask of BARs to unmap and release
  331. *
  332. * Unmap and release regions specified by @mask.
  333. */
  334. void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
  335. {
  336. void __iomem * const *iomap;
  337. int i;
  338. iomap = pcim_iomap_table(pdev);
  339. if (!iomap)
  340. return;
  341. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  342. if (!(mask & (1 << i)))
  343. continue;
  344. pcim_iounmap(pdev, iomap[i]);
  345. pci_release_region(pdev, i);
  346. }
  347. }
  348. EXPORT_SYMBOL(pcim_iounmap_regions);
  349. #endif /* CONFIG_PCI */