memremap.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /*
  2. * Copyright(c) 2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/types.h>
  15. #include <linux/io.h>
  16. #include <linux/mm.h>
  17. #include <linux/memory_hotplug.h>
  18. #ifndef ioremap_cache
  19. /* temporary while we convert existing ioremap_cache users to memremap */
  20. __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
  21. {
  22. return ioremap(offset, size);
  23. }
  24. #endif
  25. /**
  26. * memremap() - remap an iomem_resource as cacheable memory
  27. * @offset: iomem resource start address
  28. * @size: size of remap
  29. * @flags: either MEMREMAP_WB or MEMREMAP_WT
  30. *
  31. * memremap() is "ioremap" for cases where it is known that the resource
  32. * being mapped does not have i/o side effects and the __iomem
  33. * annotation is not applicable.
  34. *
  35. * MEMREMAP_WB - matches the default mapping for "System RAM" on
  36. * the architecture. This is usually a read-allocate write-back cache.
  37. * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
  38. * memremap() will bypass establishing a new mapping and instead return
  39. * a pointer into the direct map.
  40. *
  41. * MEMREMAP_WT - establish a mapping whereby writes either bypass the
  42. * cache or are written through to memory and never exist in a
  43. * cache-dirty state with respect to program visibility. Attempts to
  44. * map "System RAM" with this mapping type will fail.
  45. */
  46. void *memremap(resource_size_t offset, size_t size, unsigned long flags)
  47. {
  48. int is_ram = region_intersects(offset, size, "System RAM");
  49. void *addr = NULL;
  50. if (is_ram == REGION_MIXED) {
  51. WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
  52. &offset, (unsigned long) size);
  53. return NULL;
  54. }
  55. /* Try all mapping types requested until one returns non-NULL */
  56. if (flags & MEMREMAP_WB) {
  57. flags &= ~MEMREMAP_WB;
  58. /*
  59. * MEMREMAP_WB is special in that it can be satisifed
  60. * from the direct map. Some archs depend on the
  61. * capability of memremap() to autodetect cases where
  62. * the requested range is potentially in "System RAM"
  63. */
  64. if (is_ram == REGION_INTERSECTS)
  65. addr = __va(offset);
  66. else
  67. addr = ioremap_cache(offset, size);
  68. }
  69. /*
  70. * If we don't have a mapping yet and more request flags are
  71. * pending then we will be attempting to establish a new virtual
  72. * address mapping. Enforce that this mapping is not aliasing
  73. * "System RAM"
  74. */
  75. if (!addr && is_ram == REGION_INTERSECTS && flags) {
  76. WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
  77. &offset, (unsigned long) size);
  78. return NULL;
  79. }
  80. if (!addr && (flags & MEMREMAP_WT)) {
  81. flags &= ~MEMREMAP_WT;
  82. addr = ioremap_wt(offset, size);
  83. }
  84. return addr;
  85. }
  86. EXPORT_SYMBOL(memremap);
  87. void memunmap(void *addr)
  88. {
  89. if (is_vmalloc_addr(addr))
  90. iounmap((void __iomem *) addr);
  91. }
  92. EXPORT_SYMBOL(memunmap);
  93. static void devm_memremap_release(struct device *dev, void *res)
  94. {
  95. memunmap(res);
  96. }
  97. static int devm_memremap_match(struct device *dev, void *res, void *match_data)
  98. {
  99. return *(void **)res == match_data;
  100. }
  101. void *devm_memremap(struct device *dev, resource_size_t offset,
  102. size_t size, unsigned long flags)
  103. {
  104. void **ptr, *addr;
  105. ptr = devres_alloc(devm_memremap_release, sizeof(*ptr), GFP_KERNEL);
  106. if (!ptr)
  107. return NULL;
  108. addr = memremap(offset, size, flags);
  109. if (addr) {
  110. *ptr = addr;
  111. devres_add(dev, ptr);
  112. } else
  113. devres_free(ptr);
  114. return addr;
  115. }
  116. EXPORT_SYMBOL(devm_memremap);
  117. void devm_memunmap(struct device *dev, void *addr)
  118. {
  119. WARN_ON(devres_destroy(dev, devm_memremap_release, devm_memremap_match,
  120. addr));
  121. memunmap(addr);
  122. }
  123. EXPORT_SYMBOL(devm_memunmap);
  124. #ifdef CONFIG_ZONE_DEVICE
  125. struct page_map {
  126. struct resource res;
  127. };
  128. static void devm_memremap_pages_release(struct device *dev, void *res)
  129. {
  130. struct page_map *page_map = res;
  131. /* pages are dead and unused, undo the arch mapping */
  132. arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
  133. }
  134. void *devm_memremap_pages(struct device *dev, struct resource *res)
  135. {
  136. int is_ram = region_intersects(res->start, resource_size(res),
  137. "System RAM");
  138. struct page_map *page_map;
  139. int error, nid;
  140. if (is_ram == REGION_MIXED) {
  141. WARN_ONCE(1, "%s attempted on mixed region %pr\n",
  142. __func__, res);
  143. return ERR_PTR(-ENXIO);
  144. }
  145. if (is_ram == REGION_INTERSECTS)
  146. return __va(res->start);
  147. page_map = devres_alloc(devm_memremap_pages_release,
  148. sizeof(*page_map), GFP_KERNEL);
  149. if (!page_map)
  150. return ERR_PTR(-ENOMEM);
  151. memcpy(&page_map->res, res, sizeof(*res));
  152. nid = dev_to_node(dev);
  153. if (nid < 0)
  154. nid = 0;
  155. error = arch_add_memory(nid, res->start, resource_size(res), true);
  156. if (error) {
  157. devres_free(page_map);
  158. return ERR_PTR(error);
  159. }
  160. devres_add(dev, page_map);
  161. return __va(res->start);
  162. }
  163. EXPORT_SYMBOL(devm_memremap_pages);
  164. #endif /* CONFIG_ZONE_DEVICE */