dma-contiguous.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. * Contiguous Memory Allocator for DMA mapping framework
  3. * Copyright (c) 2010-2011 by Samsung Electronics.
  4. * Written by:
  5. * Marek Szyprowski <m.szyprowski@samsung.com>
  6. * Michal Nazarewicz <mina86@mina86.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License as
  10. * published by the Free Software Foundation; either version 2 of the
  11. * License or (at your optional) any later version of the license.
  12. */
  13. #define pr_fmt(fmt) "cma: " fmt
  14. #ifdef CONFIG_CMA_DEBUG
  15. #ifndef DEBUG
  16. # define DEBUG
  17. #endif
  18. #endif
  19. #include <asm/page.h>
  20. #include <asm/dma-contiguous.h>
  21. #include <linux/memblock.h>
  22. #include <linux/err.h>
  23. #include <linux/mm.h>
  24. #include <linux/mutex.h>
  25. #include <linux/page-isolation.h>
  26. #include <linux/sizes.h>
  27. #include <linux/slab.h>
  28. #include <linux/swap.h>
  29. #include <linux/mm_types.h>
  30. #include <linux/dma-contiguous.h>
  31. struct cma {
  32. unsigned long base_pfn;
  33. unsigned long count;
  34. unsigned long *bitmap;
  35. struct mutex lock;
  36. };
  37. struct cma *dma_contiguous_default_area;
  38. #ifdef CONFIG_CMA_SIZE_MBYTES
  39. #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
  40. #else
  41. #define CMA_SIZE_MBYTES 0
  42. #endif
  43. /*
  44. * Default global CMA area size can be defined in kernel's .config.
  45. * This is useful mainly for distro maintainers to create a kernel
  46. * that works correctly for most supported systems.
  47. * The size can be set in bytes or as a percentage of the total memory
  48. * in the system.
  49. *
  50. * Users, who want to set the size of global CMA area for their system
  51. * should use cma= kernel parameter.
  52. */
  53. static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
  54. static phys_addr_t size_cmdline = -1;
  55. static phys_addr_t base_cmdline;
  56. static phys_addr_t limit_cmdline;
  57. static int __init early_cma(char *p)
  58. {
  59. pr_debug("%s(%s)\n", __func__, p);
  60. size_cmdline = memparse(p, &p);
  61. if (*p != '@')
  62. return 0;
  63. base_cmdline = memparse(p + 1, &p);
  64. if (*p != '-') {
  65. limit_cmdline = base_cmdline + size_cmdline;
  66. return 0;
  67. }
  68. limit_cmdline = memparse(p + 1, &p);
  69. return 0;
  70. }
  71. early_param("cma", early_cma);
  72. #ifdef CONFIG_CMA_SIZE_PERCENTAGE
  73. static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
  74. {
  75. struct memblock_region *reg;
  76. unsigned long total_pages = 0;
  77. /*
  78. * We cannot use memblock_phys_mem_size() here, because
  79. * memblock_analyze() has not been called yet.
  80. */
  81. for_each_memblock(memory, reg)
  82. total_pages += memblock_region_memory_end_pfn(reg) -
  83. memblock_region_memory_base_pfn(reg);
  84. return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
  85. }
  86. #else
  87. static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
  88. {
  89. return 0;
  90. }
  91. #endif
  92. /**
  93. * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
  94. * @limit: End address of the reserved memory (optional, 0 for any).
  95. *
  96. * This function reserves memory from early allocator. It should be
  97. * called by arch specific code once the early allocator (memblock or bootmem)
  98. * has been activated and all other subsystems have already allocated/reserved
  99. * memory.
  100. */
  101. void __init dma_contiguous_reserve(phys_addr_t limit)
  102. {
  103. phys_addr_t selected_size = 0;
  104. phys_addr_t selected_base = 0;
  105. phys_addr_t selected_limit = limit;
  106. bool fixed = false;
  107. pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
  108. if (size_cmdline != -1) {
  109. selected_size = size_cmdline;
  110. selected_base = base_cmdline;
  111. selected_limit = min_not_zero(limit_cmdline, limit);
  112. if (base_cmdline + size_cmdline == limit_cmdline)
  113. fixed = true;
  114. } else {
  115. #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
  116. selected_size = size_bytes;
  117. #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
  118. selected_size = cma_early_percent_memory();
  119. #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
  120. selected_size = min(size_bytes, cma_early_percent_memory());
  121. #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
  122. selected_size = max(size_bytes, cma_early_percent_memory());
  123. #endif
  124. }
  125. if (selected_size && !dma_contiguous_default_area) {
  126. pr_debug("%s: reserving %ld MiB for global area\n", __func__,
  127. (unsigned long)selected_size / SZ_1M);
  128. dma_contiguous_reserve_area(selected_size, selected_base,
  129. selected_limit,
  130. &dma_contiguous_default_area,
  131. fixed);
  132. }
  133. }
  134. static DEFINE_MUTEX(cma_mutex);
  135. static int __init cma_activate_area(struct cma *cma)
  136. {
  137. int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
  138. unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
  139. unsigned i = cma->count >> pageblock_order;
  140. struct zone *zone;
  141. cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  142. if (!cma->bitmap)
  143. return -ENOMEM;
  144. WARN_ON_ONCE(!pfn_valid(pfn));
  145. zone = page_zone(pfn_to_page(pfn));
  146. do {
  147. unsigned j;
  148. base_pfn = pfn;
  149. for (j = pageblock_nr_pages; j; --j, pfn++) {
  150. WARN_ON_ONCE(!pfn_valid(pfn));
  151. if (page_zone(pfn_to_page(pfn)) != zone)
  152. return -EINVAL;
  153. }
  154. init_cma_reserved_pageblock(pfn_to_page(base_pfn));
  155. } while (--i);
  156. mutex_init(&cma->lock);
  157. return 0;
  158. }
  159. static struct cma cma_areas[MAX_CMA_AREAS];
  160. static unsigned cma_area_count;
  161. static int __init cma_init_reserved_areas(void)
  162. {
  163. int i;
  164. for (i = 0; i < cma_area_count; i++) {
  165. int ret = cma_activate_area(&cma_areas[i]);
  166. if (ret)
  167. return ret;
  168. }
  169. return 0;
  170. }
  171. core_initcall(cma_init_reserved_areas);
  172. /**
  173. * dma_contiguous_reserve_area() - reserve custom contiguous area
  174. * @size: Size of the reserved area (in bytes),
  175. * @base: Base address of the reserved area optional, use 0 for any
  176. * @limit: End address of the reserved memory (optional, 0 for any).
  177. * @res_cma: Pointer to store the created cma region.
  178. * @fixed: hint about where to place the reserved area
  179. *
  180. * This function reserves memory from early allocator. It should be
  181. * called by arch specific code once the early allocator (memblock or bootmem)
  182. * has been activated and all other subsystems have already allocated/reserved
  183. * memory. This function allows to create custom reserved areas for specific
  184. * devices.
  185. *
  186. * If @fixed is true, reserve contiguous area at exactly @base. If false,
  187. * reserve in range from @base to @limit.
  188. */
  189. int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
  190. phys_addr_t limit, struct cma **res_cma,
  191. bool fixed)
  192. {
  193. struct cma *cma = &cma_areas[cma_area_count];
  194. phys_addr_t alignment;
  195. int ret = 0;
  196. pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
  197. (unsigned long)size, (unsigned long)base,
  198. (unsigned long)limit);
  199. /* Sanity checks */
  200. if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  201. pr_err("Not enough slots for CMA reserved regions!\n");
  202. return -ENOSPC;
  203. }
  204. if (!size)
  205. return -EINVAL;
  206. /* Sanitise input arguments */
  207. alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
  208. base = ALIGN(base, alignment);
  209. size = ALIGN(size, alignment);
  210. limit &= ~(alignment - 1);
  211. /* Reserve memory */
  212. if (base && fixed) {
  213. if (memblock_is_region_reserved(base, size) ||
  214. memblock_reserve(base, size) < 0) {
  215. ret = -EBUSY;
  216. goto err;
  217. }
  218. } else {
  219. phys_addr_t addr = memblock_alloc_range(size, alignment, base,
  220. limit);
  221. if (!addr) {
  222. ret = -ENOMEM;
  223. goto err;
  224. } else {
  225. base = addr;
  226. }
  227. }
  228. /*
  229. * Each reserved area must be initialised later, when more kernel
  230. * subsystems (like slab allocator) are available.
  231. */
  232. cma->base_pfn = PFN_DOWN(base);
  233. cma->count = size >> PAGE_SHIFT;
  234. *res_cma = cma;
  235. cma_area_count++;
  236. pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
  237. (unsigned long)base);
  238. /* Architecture specific contiguous memory fixup. */
  239. dma_contiguous_early_fixup(base, size);
  240. return 0;
  241. err:
  242. pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
  243. return ret;
  244. }
  245. static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
  246. {
  247. mutex_lock(&cma->lock);
  248. bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
  249. mutex_unlock(&cma->lock);
  250. }
  251. /**
  252. * dma_alloc_from_contiguous() - allocate pages from contiguous area
  253. * @dev: Pointer to device for which the allocation is performed.
  254. * @count: Requested number of pages.
  255. * @align: Requested alignment of pages (in PAGE_SIZE order).
  256. *
  257. * This function allocates memory buffer for specified device. It uses
  258. * device specific contiguous memory area if available or the default
  259. * global one. Requires architecture specific dev_get_cma_area() helper
  260. * function.
  261. */
  262. struct page *dma_alloc_from_contiguous(struct device *dev, int count,
  263. unsigned int align)
  264. {
  265. unsigned long mask, pfn, pageno, start = 0;
  266. struct cma *cma = dev_get_cma_area(dev);
  267. struct page *page = NULL;
  268. int ret;
  269. if (!cma || !cma->count)
  270. return NULL;
  271. if (align > CONFIG_CMA_ALIGNMENT)
  272. align = CONFIG_CMA_ALIGNMENT;
  273. pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
  274. count, align);
  275. if (!count)
  276. return NULL;
  277. mask = (1 << align) - 1;
  278. for (;;) {
  279. mutex_lock(&cma->lock);
  280. pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
  281. start, count, mask);
  282. if (pageno >= cma->count) {
  283. mutex_unlock(&cma->lock);
  284. break;
  285. }
  286. bitmap_set(cma->bitmap, pageno, count);
  287. /*
  288. * It's safe to drop the lock here. We've marked this region for
  289. * our exclusive use. If the migration fails we will take the
  290. * lock again and unmark it.
  291. */
  292. mutex_unlock(&cma->lock);
  293. pfn = cma->base_pfn + pageno;
  294. mutex_lock(&cma_mutex);
  295. ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
  296. mutex_unlock(&cma_mutex);
  297. if (ret == 0) {
  298. page = pfn_to_page(pfn);
  299. break;
  300. } else if (ret != -EBUSY) {
  301. clear_cma_bitmap(cma, pfn, count);
  302. break;
  303. }
  304. clear_cma_bitmap(cma, pfn, count);
  305. pr_debug("%s(): memory range at %p is busy, retrying\n",
  306. __func__, pfn_to_page(pfn));
  307. /* try again with a bit different memory target */
  308. start = pageno + mask + 1;
  309. }
  310. pr_debug("%s(): returned %p\n", __func__, page);
  311. return page;
  312. }
  313. /**
  314. * dma_release_from_contiguous() - release allocated pages
  315. * @dev: Pointer to device for which the pages were allocated.
  316. * @pages: Allocated pages.
  317. * @count: Number of allocated pages.
  318. *
  319. * This function releases memory allocated by dma_alloc_from_contiguous().
  320. * It returns false when provided pages do not belong to contiguous area and
  321. * true otherwise.
  322. */
  323. bool dma_release_from_contiguous(struct device *dev, struct page *pages,
  324. int count)
  325. {
  326. struct cma *cma = dev_get_cma_area(dev);
  327. unsigned long pfn;
  328. if (!cma || !pages)
  329. return false;
  330. pr_debug("%s(page %p)\n", __func__, (void *)pages);
  331. pfn = page_to_pfn(pages);
  332. if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
  333. return false;
  334. VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
  335. free_contig_range(pfn, count);
  336. clear_cma_bitmap(cma, pfn, count);
  337. return true;
  338. }