memremap.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
  3. #include <linux/radix-tree.h>
  4. #include <linux/device.h>
  5. #include <linux/types.h>
  6. #include <linux/pfn_t.h>
  7. #include <linux/io.h>
  8. #include <linux/mm.h>
  9. #include <linux/memory_hotplug.h>
  10. #include <linux/swap.h>
  11. #include <linux/swapops.h>
  12. #include <linux/wait_bit.h>
  13. static DEFINE_MUTEX(pgmap_lock);
  14. static RADIX_TREE(pgmap_radix, GFP_KERNEL);
  15. #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
  16. #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
  17. static unsigned long order_at(struct resource *res, unsigned long pgoff)
  18. {
  19. unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff;
  20. unsigned long nr_pages, mask;
  21. nr_pages = PHYS_PFN(resource_size(res));
  22. if (nr_pages == pgoff)
  23. return ULONG_MAX;
  24. /*
  25. * What is the largest aligned power-of-2 range available from
  26. * this resource pgoff to the end of the resource range,
  27. * considering the alignment of the current pgoff?
  28. */
  29. mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff);
  30. if (!mask)
  31. return ULONG_MAX;
  32. return find_first_bit(&mask, BITS_PER_LONG);
  33. }
  34. #define foreach_order_pgoff(res, order, pgoff) \
  35. for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \
  36. pgoff += 1UL << order, order = order_at((res), pgoff))
  37. #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
  38. int device_private_entry_fault(struct vm_area_struct *vma,
  39. unsigned long addr,
  40. swp_entry_t entry,
  41. unsigned int flags,
  42. pmd_t *pmdp)
  43. {
  44. struct page *page = device_private_entry_to_page(entry);
  45. /*
  46. * The page_fault() callback must migrate page back to system memory
  47. * so that CPU can access it. This might fail for various reasons
  48. * (device issue, device was unsafely unplugged, ...). When such
  49. * error conditions happen, the callback must return VM_FAULT_SIGBUS.
  50. *
  51. * Note that because memory cgroup charges are accounted to the device
  52. * memory, this should never fail because of memory restrictions (but
  53. * allocation of regular system page might still fail because we are
  54. * out of memory).
  55. *
  56. * There is a more in-depth description of what that callback can and
  57. * cannot do, in include/linux/memremap.h
  58. */
  59. return page->pgmap->page_fault(vma, addr, page, flags, pmdp);
  60. }
  61. EXPORT_SYMBOL(device_private_entry_fault);
  62. #endif /* CONFIG_DEVICE_PRIVATE */
  63. static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff)
  64. {
  65. unsigned long pgoff, order;
  66. mutex_lock(&pgmap_lock);
  67. foreach_order_pgoff(res, order, pgoff) {
  68. if (pgoff >= end_pgoff)
  69. break;
  70. radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
  71. }
  72. mutex_unlock(&pgmap_lock);
  73. synchronize_rcu();
  74. }
  75. static unsigned long pfn_first(struct dev_pagemap *pgmap)
  76. {
  77. const struct resource *res = &pgmap->res;
  78. struct vmem_altmap *altmap = &pgmap->altmap;
  79. unsigned long pfn;
  80. pfn = res->start >> PAGE_SHIFT;
  81. if (pgmap->altmap_valid)
  82. pfn += vmem_altmap_offset(altmap);
  83. return pfn;
  84. }
  85. static unsigned long pfn_end(struct dev_pagemap *pgmap)
  86. {
  87. const struct resource *res = &pgmap->res;
  88. return (res->start + resource_size(res)) >> PAGE_SHIFT;
  89. }
  90. static unsigned long pfn_next(unsigned long pfn)
  91. {
  92. if (pfn % 1024 == 0)
  93. cond_resched();
  94. return pfn + 1;
  95. }
  96. #define for_each_device_pfn(pfn, map) \
  97. for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
  98. static void devm_memremap_pages_release(void *data)
  99. {
  100. struct dev_pagemap *pgmap = data;
  101. struct device *dev = pgmap->dev;
  102. struct resource *res = &pgmap->res;
  103. resource_size_t align_start, align_size;
  104. unsigned long pfn;
  105. for_each_device_pfn(pfn, pgmap)
  106. put_page(pfn_to_page(pfn));
  107. if (percpu_ref_tryget_live(pgmap->ref)) {
  108. dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
  109. percpu_ref_put(pgmap->ref);
  110. }
  111. /* pages are dead and unused, undo the arch mapping */
  112. align_start = res->start & ~(SECTION_SIZE - 1);
  113. align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
  114. - align_start;
  115. mem_hotplug_begin();
  116. arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
  117. &pgmap->altmap : NULL);
  118. mem_hotplug_done();
  119. untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
  120. pgmap_radix_release(res, -1);
  121. dev_WARN_ONCE(dev, pgmap->altmap.alloc,
  122. "%s: failed to free all reserved pages\n", __func__);
  123. }
  124. /**
  125. * devm_memremap_pages - remap and provide memmap backing for the given resource
  126. * @dev: hosting device for @res
  127. * @pgmap: pointer to a struct dev_pgmap
  128. *
  129. * Notes:
  130. * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
  131. * by the caller before passing it to this function
  132. *
  133. * 2/ The altmap field may optionally be initialized, in which case altmap_valid
  134. * must be set to true
  135. *
  136. * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
  137. * time (or devm release event). The expected order of events is that ref has
  138. * been through percpu_ref_kill() before devm_memremap_pages_release(). The
  139. * wait for the completion of all references being dropped and
  140. * percpu_ref_exit() must occur after devm_memremap_pages_release().
  141. *
  142. * 4/ res is expected to be a host memory range that could feasibly be
  143. * treated as a "System RAM" range, i.e. not a device mmio range, but
  144. * this is not enforced.
  145. */
  146. void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
  147. {
  148. resource_size_t align_start, align_size, align_end;
  149. struct vmem_altmap *altmap = pgmap->altmap_valid ?
  150. &pgmap->altmap : NULL;
  151. struct resource *res = &pgmap->res;
  152. unsigned long pfn, pgoff, order;
  153. pgprot_t pgprot = PAGE_KERNEL;
  154. int error, nid, is_ram;
  155. align_start = res->start & ~(SECTION_SIZE - 1);
  156. align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
  157. - align_start;
  158. is_ram = region_intersects(align_start, align_size,
  159. IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
  160. if (is_ram == REGION_MIXED) {
  161. WARN_ONCE(1, "%s attempted on mixed region %pr\n",
  162. __func__, res);
  163. return ERR_PTR(-ENXIO);
  164. }
  165. if (is_ram == REGION_INTERSECTS)
  166. return __va(res->start);
  167. if (!pgmap->ref)
  168. return ERR_PTR(-EINVAL);
  169. pgmap->dev = dev;
  170. mutex_lock(&pgmap_lock);
  171. error = 0;
  172. align_end = align_start + align_size - 1;
  173. foreach_order_pgoff(res, order, pgoff) {
  174. error = __radix_tree_insert(&pgmap_radix,
  175. PHYS_PFN(res->start) + pgoff, order, pgmap);
  176. if (error) {
  177. dev_err(dev, "%s: failed: %d\n", __func__, error);
  178. break;
  179. }
  180. }
  181. mutex_unlock(&pgmap_lock);
  182. if (error)
  183. goto err_radix;
  184. nid = dev_to_node(dev);
  185. if (nid < 0)
  186. nid = numa_mem_id();
  187. error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
  188. align_size);
  189. if (error)
  190. goto err_pfn_remap;
  191. mem_hotplug_begin();
  192. error = arch_add_memory(nid, align_start, align_size, altmap, false);
  193. if (!error)
  194. move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
  195. align_start >> PAGE_SHIFT,
  196. align_size >> PAGE_SHIFT, altmap);
  197. mem_hotplug_done();
  198. if (error)
  199. goto err_add_memory;
  200. for_each_device_pfn(pfn, pgmap) {
  201. struct page *page = pfn_to_page(pfn);
  202. /*
  203. * ZONE_DEVICE pages union ->lru with a ->pgmap back
  204. * pointer. It is a bug if a ZONE_DEVICE page is ever
  205. * freed or placed on a driver-private list. Seed the
  206. * storage with LIST_POISON* values.
  207. */
  208. list_del(&page->lru);
  209. page->pgmap = pgmap;
  210. percpu_ref_get(pgmap->ref);
  211. }
  212. devm_add_action(dev, devm_memremap_pages_release, pgmap);
  213. return __va(res->start);
  214. err_add_memory:
  215. untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
  216. err_pfn_remap:
  217. err_radix:
  218. pgmap_radix_release(res, pgoff);
  219. return ERR_PTR(error);
  220. }
  221. EXPORT_SYMBOL(devm_memremap_pages);
  222. unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
  223. {
  224. /* number of pfns from base where pfn_to_page() is valid */
  225. return altmap->reserve + altmap->free;
  226. }
  227. void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
  228. {
  229. altmap->alloc -= nr_pfns;
  230. }
  231. /**
  232. * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
  233. * @pfn: page frame number to lookup page_map
  234. * @pgmap: optional known pgmap that already has a reference
  235. *
  236. * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
  237. * is non-NULL but does not cover @pfn the reference to it will be released.
  238. */
  239. struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
  240. struct dev_pagemap *pgmap)
  241. {
  242. resource_size_t phys = PFN_PHYS(pfn);
  243. /*
  244. * In the cached case we're already holding a live reference.
  245. */
  246. if (pgmap) {
  247. if (phys >= pgmap->res.start && phys <= pgmap->res.end)
  248. return pgmap;
  249. put_dev_pagemap(pgmap);
  250. }
  251. /* fall back to slow path lookup */
  252. rcu_read_lock();
  253. pgmap = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys));
  254. if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
  255. pgmap = NULL;
  256. rcu_read_unlock();
  257. return pgmap;
  258. }
  259. EXPORT_SYMBOL_GPL(get_dev_pagemap);
  260. #ifdef CONFIG_DEV_PAGEMAP_OPS
  261. DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
  262. EXPORT_SYMBOL_GPL(devmap_managed_key);
  263. static atomic_t devmap_enable;
  264. /*
  265. * Toggle the static key for ->page_free() callbacks when dev_pagemap
  266. * pages go idle.
  267. */
  268. void dev_pagemap_get_ops(void)
  269. {
  270. if (atomic_inc_return(&devmap_enable) == 1)
  271. static_branch_enable(&devmap_managed_key);
  272. }
  273. EXPORT_SYMBOL_GPL(dev_pagemap_get_ops);
  274. void dev_pagemap_put_ops(void)
  275. {
  276. if (atomic_dec_and_test(&devmap_enable))
  277. static_branch_disable(&devmap_managed_key);
  278. }
  279. EXPORT_SYMBOL_GPL(dev_pagemap_put_ops);
  280. void __put_devmap_managed_page(struct page *page)
  281. {
  282. int count = page_ref_dec_return(page);
  283. /*
  284. * If refcount is 1 then page is freed and refcount is stable as nobody
  285. * holds a reference on the page.
  286. */
  287. if (count == 1) {
  288. /* Clear Active bit in case of parallel mark_page_accessed */
  289. __ClearPageActive(page);
  290. __ClearPageWaiters(page);
  291. page->mapping = NULL;
  292. mem_cgroup_uncharge(page);
  293. page->pgmap->page_free(page, page->pgmap->data);
  294. } else if (!count)
  295. __put_page(page);
  296. }
  297. EXPORT_SYMBOL_GPL(__put_devmap_managed_page);
  298. #endif /* CONFIG_DEV_PAGEMAP_OPS */