devres.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. #include <linux/err.h>
  2. #include <linux/pci.h>
  3. #include <linux/io.h>
  4. #include <linux/gfp.h>
  5. #include <linux/export.h>
  6. void devm_ioremap_release(struct device *dev, void *res)
  7. {
  8. iounmap(*(void __iomem **)res);
  9. }
  10. static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
  11. {
  12. return *(void **)res == match_data;
  13. }
  14. /**
  15. * devm_ioremap - Managed ioremap()
  16. * @dev: Generic device to remap IO address for
  17. * @offset: BUS offset to map
  18. * @size: Size of map
  19. *
  20. * Managed ioremap(). Map is automatically unmapped on driver detach.
  21. */
  22. void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
  23. unsigned long size)
  24. {
  25. void __iomem **ptr, *addr;
  26. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  27. if (!ptr)
  28. return NULL;
  29. addr = ioremap(offset, size);
  30. if (addr) {
  31. *ptr = addr;
  32. devres_add(dev, ptr);
  33. } else
  34. devres_free(ptr);
  35. return addr;
  36. }
  37. EXPORT_SYMBOL(devm_ioremap);
  38. /**
  39. * devm_ioremap_nocache - Managed ioremap_nocache()
  40. * @dev: Generic device to remap IO address for
  41. * @offset: BUS offset to map
  42. * @size: Size of map
  43. *
  44. * Managed ioremap_nocache(). Map is automatically unmapped on driver
  45. * detach.
  46. */
  47. void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
  48. unsigned long size)
  49. {
  50. void __iomem **ptr, *addr;
  51. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  52. if (!ptr)
  53. return NULL;
  54. addr = ioremap_nocache(offset, size);
  55. if (addr) {
  56. *ptr = addr;
  57. devres_add(dev, ptr);
  58. } else
  59. devres_free(ptr);
  60. return addr;
  61. }
  62. EXPORT_SYMBOL(devm_ioremap_nocache);
  63. /**
  64. * devm_iounmap - Managed iounmap()
  65. * @dev: Generic device to unmap for
  66. * @addr: Address to unmap
  67. *
  68. * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
  69. */
  70. void devm_iounmap(struct device *dev, void __iomem *addr)
  71. {
  72. WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
  73. (__force void *)addr));
  74. iounmap(addr);
  75. }
  76. EXPORT_SYMBOL(devm_iounmap);
  77. #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
  78. /**
  79. * devm_ioremap_resource() - check, request region, and ioremap resource
  80. * @dev: generic device to handle the resource for
  81. * @res: resource to be handled
  82. *
  83. * Checks that a resource is a valid memory region, requests the memory region
  84. * and ioremaps it either as cacheable or as non-cacheable memory depending on
  85. * the resource's flags. All operations are managed and will be undone on
  86. * driver detach.
  87. *
  88. * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
  89. * on failure. Usage example:
  90. *
  91. * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  92. * base = devm_ioremap_resource(&pdev->dev, res);
  93. * if (IS_ERR(base))
  94. * return PTR_ERR(base);
  95. */
  96. void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
  97. {
  98. resource_size_t size;
  99. const char *name;
  100. void __iomem *dest_ptr;
  101. BUG_ON(!dev);
  102. if (!res || resource_type(res) != IORESOURCE_MEM) {
  103. dev_err(dev, "invalid resource\n");
  104. return IOMEM_ERR_PTR(-EINVAL);
  105. }
  106. size = resource_size(res);
  107. name = res->name ?: dev_name(dev);
  108. if (!devm_request_mem_region(dev, res->start, size, name)) {
  109. dev_err(dev, "can't request region for resource %pR\n", res);
  110. return IOMEM_ERR_PTR(-EBUSY);
  111. }
  112. if (res->flags & IORESOURCE_CACHEABLE)
  113. dest_ptr = devm_ioremap(dev, res->start, size);
  114. else
  115. dest_ptr = devm_ioremap_nocache(dev, res->start, size);
  116. if (!dest_ptr) {
  117. dev_err(dev, "ioremap failed for resource %pR\n", res);
  118. devm_release_mem_region(dev, res->start, size);
  119. dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
  120. }
  121. return dest_ptr;
  122. }
  123. EXPORT_SYMBOL(devm_ioremap_resource);
  124. /**
  125. * devm_request_and_ioremap() - Check, request region, and ioremap resource
  126. * @dev: Generic device to handle the resource for
  127. * @res: resource to be handled
  128. *
  129. * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
  130. * everything is undone on driver detach. Checks arguments, so you can feed
  131. * it the result from e.g. platform_get_resource() directly. Returns the
  132. * remapped pointer or NULL on error. Usage example:
  133. *
  134. * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  135. * base = devm_request_and_ioremap(&pdev->dev, res);
  136. * if (!base)
  137. * return -EADDRNOTAVAIL;
  138. */
  139. void __iomem *devm_request_and_ioremap(struct device *dev,
  140. struct resource *res)
  141. {
  142. void __iomem *dest_ptr;
  143. dest_ptr = devm_ioremap_resource(dev, res);
  144. if (IS_ERR(dest_ptr))
  145. return NULL;
  146. return dest_ptr;
  147. }
  148. EXPORT_SYMBOL(devm_request_and_ioremap);
  149. #ifdef CONFIG_HAS_IOPORT_MAP
  150. /*
  151. * Generic iomap devres
  152. */
  153. static void devm_ioport_map_release(struct device *dev, void *res)
  154. {
  155. ioport_unmap(*(void __iomem **)res);
  156. }
  157. static int devm_ioport_map_match(struct device *dev, void *res,
  158. void *match_data)
  159. {
  160. return *(void **)res == match_data;
  161. }
  162. /**
  163. * devm_ioport_map - Managed ioport_map()
  164. * @dev: Generic device to map ioport for
  165. * @port: Port to map
  166. * @nr: Number of ports to map
  167. *
  168. * Managed ioport_map(). Map is automatically unmapped on driver
  169. * detach.
  170. */
  171. void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
  172. unsigned int nr)
  173. {
  174. void __iomem **ptr, *addr;
  175. ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
  176. if (!ptr)
  177. return NULL;
  178. addr = ioport_map(port, nr);
  179. if (addr) {
  180. *ptr = addr;
  181. devres_add(dev, ptr);
  182. } else
  183. devres_free(ptr);
  184. return addr;
  185. }
  186. EXPORT_SYMBOL(devm_ioport_map);
  187. /**
  188. * devm_ioport_unmap - Managed ioport_unmap()
  189. * @dev: Generic device to unmap for
  190. * @addr: Address to unmap
  191. *
  192. * Managed ioport_unmap(). @addr must have been mapped using
  193. * devm_ioport_map().
  194. */
  195. void devm_ioport_unmap(struct device *dev, void __iomem *addr)
  196. {
  197. ioport_unmap(addr);
  198. WARN_ON(devres_destroy(dev, devm_ioport_map_release,
  199. devm_ioport_map_match, (__force void *)addr));
  200. }
  201. EXPORT_SYMBOL(devm_ioport_unmap);
  202. #endif /* CONFIG_HAS_IOPORT_MAP */
  203. #ifdef CONFIG_PCI
  204. /*
  205. * PCI iomap devres
  206. */
  207. #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
  208. struct pcim_iomap_devres {
  209. void __iomem *table[PCIM_IOMAP_MAX];
  210. };
  211. static void pcim_iomap_release(struct device *gendev, void *res)
  212. {
  213. struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
  214. struct pcim_iomap_devres *this = res;
  215. int i;
  216. for (i = 0; i < PCIM_IOMAP_MAX; i++)
  217. if (this->table[i])
  218. pci_iounmap(dev, this->table[i]);
  219. }
  220. /**
  221. * pcim_iomap_table - access iomap allocation table
  222. * @pdev: PCI device to access iomap table for
  223. *
  224. * Access iomap allocation table for @dev. If iomap table doesn't
  225. * exist and @pdev is managed, it will be allocated. All iomaps
  226. * recorded in the iomap table are automatically unmapped on driver
  227. * detach.
  228. *
  229. * This function might sleep when the table is first allocated but can
  230. * be safely called without context and guaranteed to succed once
  231. * allocated.
  232. */
  233. void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
  234. {
  235. struct pcim_iomap_devres *dr, *new_dr;
  236. dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
  237. if (dr)
  238. return dr->table;
  239. new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
  240. if (!new_dr)
  241. return NULL;
  242. dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
  243. return dr->table;
  244. }
  245. EXPORT_SYMBOL(pcim_iomap_table);
  246. /**
  247. * pcim_iomap - Managed pcim_iomap()
  248. * @pdev: PCI device to iomap for
  249. * @bar: BAR to iomap
  250. * @maxlen: Maximum length of iomap
  251. *
  252. * Managed pci_iomap(). Map is automatically unmapped on driver
  253. * detach.
  254. */
  255. void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
  256. {
  257. void __iomem **tbl;
  258. BUG_ON(bar >= PCIM_IOMAP_MAX);
  259. tbl = (void __iomem **)pcim_iomap_table(pdev);
  260. if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
  261. return NULL;
  262. tbl[bar] = pci_iomap(pdev, bar, maxlen);
  263. return tbl[bar];
  264. }
  265. EXPORT_SYMBOL(pcim_iomap);
  266. /**
  267. * pcim_iounmap - Managed pci_iounmap()
  268. * @pdev: PCI device to iounmap for
  269. * @addr: Address to unmap
  270. *
  271. * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
  272. */
  273. void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
  274. {
  275. void __iomem **tbl;
  276. int i;
  277. pci_iounmap(pdev, addr);
  278. tbl = (void __iomem **)pcim_iomap_table(pdev);
  279. BUG_ON(!tbl);
  280. for (i = 0; i < PCIM_IOMAP_MAX; i++)
  281. if (tbl[i] == addr) {
  282. tbl[i] = NULL;
  283. return;
  284. }
  285. WARN_ON(1);
  286. }
  287. EXPORT_SYMBOL(pcim_iounmap);
  288. /**
  289. * pcim_iomap_regions - Request and iomap PCI BARs
  290. * @pdev: PCI device to map IO resources for
  291. * @mask: Mask of BARs to request and iomap
  292. * @name: Name used when requesting regions
  293. *
  294. * Request and iomap regions specified by @mask.
  295. */
  296. int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
  297. {
  298. void __iomem * const *iomap;
  299. int i, rc;
  300. iomap = pcim_iomap_table(pdev);
  301. if (!iomap)
  302. return -ENOMEM;
  303. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  304. unsigned long len;
  305. if (!(mask & (1 << i)))
  306. continue;
  307. rc = -EINVAL;
  308. len = pci_resource_len(pdev, i);
  309. if (!len)
  310. goto err_inval;
  311. rc = pci_request_region(pdev, i, name);
  312. if (rc)
  313. goto err_inval;
  314. rc = -ENOMEM;
  315. if (!pcim_iomap(pdev, i, 0))
  316. goto err_region;
  317. }
  318. return 0;
  319. err_region:
  320. pci_release_region(pdev, i);
  321. err_inval:
  322. while (--i >= 0) {
  323. if (!(mask & (1 << i)))
  324. continue;
  325. pcim_iounmap(pdev, iomap[i]);
  326. pci_release_region(pdev, i);
  327. }
  328. return rc;
  329. }
  330. EXPORT_SYMBOL(pcim_iomap_regions);
  331. /**
  332. * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
  333. * @pdev: PCI device to map IO resources for
  334. * @mask: Mask of BARs to iomap
  335. * @name: Name used when requesting regions
  336. *
  337. * Request all PCI BARs and iomap regions specified by @mask.
  338. */
  339. int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
  340. const char *name)
  341. {
  342. int request_mask = ((1 << 6) - 1) & ~mask;
  343. int rc;
  344. rc = pci_request_selected_regions(pdev, request_mask, name);
  345. if (rc)
  346. return rc;
  347. rc = pcim_iomap_regions(pdev, mask, name);
  348. if (rc)
  349. pci_release_selected_regions(pdev, request_mask);
  350. return rc;
  351. }
  352. EXPORT_SYMBOL(pcim_iomap_regions_request_all);
  353. /**
  354. * pcim_iounmap_regions - Unmap and release PCI BARs
  355. * @pdev: PCI device to map IO resources for
  356. * @mask: Mask of BARs to unmap and release
  357. *
  358. * Unmap and release regions specified by @mask.
  359. */
  360. void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
  361. {
  362. void __iomem * const *iomap;
  363. int i;
  364. iomap = pcim_iomap_table(pdev);
  365. if (!iomap)
  366. return;
  367. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  368. if (!(mask & (1 << i)))
  369. continue;
  370. pcim_iounmap(pdev, iomap[i]);
  371. pci_release_region(pdev, i);
  372. }
  373. }
  374. EXPORT_SYMBOL(pcim_iounmap_regions);
  375. #endif /* CONFIG_PCI */