dma-mapping.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
  3. *
  4. * Copyright (c) 2006 SUSE Linux Products GmbH
  5. * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
  6. *
  7. * This file is released under the GPLv2.
  8. */
  9. #include <linux/acpi.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/export.h>
  12. #include <linux/gfp.h>
  13. #include <linux/of_device.h>
  14. #include <linux/slab.h>
  15. #include <linux/vmalloc.h>
  16. /*
  17. * Managed DMA API
  18. */
  19. struct dma_devres {
  20. size_t size;
  21. void *vaddr;
  22. dma_addr_t dma_handle;
  23. };
  24. static void dmam_coherent_release(struct device *dev, void *res)
  25. {
  26. struct dma_devres *this = res;
  27. dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
  28. }
  29. static void dmam_noncoherent_release(struct device *dev, void *res)
  30. {
  31. struct dma_devres *this = res;
  32. dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
  33. }
  34. static int dmam_match(struct device *dev, void *res, void *match_data)
  35. {
  36. struct dma_devres *this = res, *match = match_data;
  37. if (this->vaddr == match->vaddr) {
  38. WARN_ON(this->size != match->size ||
  39. this->dma_handle != match->dma_handle);
  40. return 1;
  41. }
  42. return 0;
  43. }
  44. /**
  45. * dmam_alloc_coherent - Managed dma_alloc_coherent()
  46. * @dev: Device to allocate coherent memory for
  47. * @size: Size of allocation
  48. * @dma_handle: Out argument for allocated DMA handle
  49. * @gfp: Allocation flags
  50. *
  51. * Managed dma_alloc_coherent(). Memory allocated using this function
  52. * will be automatically released on driver detach.
  53. *
  54. * RETURNS:
  55. * Pointer to allocated memory on success, NULL on failure.
  56. */
  57. void *dmam_alloc_coherent(struct device *dev, size_t size,
  58. dma_addr_t *dma_handle, gfp_t gfp)
  59. {
  60. struct dma_devres *dr;
  61. void *vaddr;
  62. dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
  63. if (!dr)
  64. return NULL;
  65. vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
  66. if (!vaddr) {
  67. devres_free(dr);
  68. return NULL;
  69. }
  70. dr->vaddr = vaddr;
  71. dr->dma_handle = *dma_handle;
  72. dr->size = size;
  73. devres_add(dev, dr);
  74. return vaddr;
  75. }
  76. EXPORT_SYMBOL(dmam_alloc_coherent);
  77. /**
  78. * dmam_free_coherent - Managed dma_free_coherent()
  79. * @dev: Device to free coherent memory for
  80. * @size: Size of allocation
  81. * @vaddr: Virtual address of the memory to free
  82. * @dma_handle: DMA handle of the memory to free
  83. *
  84. * Managed dma_free_coherent().
  85. */
  86. void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  87. dma_addr_t dma_handle)
  88. {
  89. struct dma_devres match_data = { size, vaddr, dma_handle };
  90. dma_free_coherent(dev, size, vaddr, dma_handle);
  91. WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
  92. &match_data));
  93. }
  94. EXPORT_SYMBOL(dmam_free_coherent);
  95. /**
  96. * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
  97. * @dev: Device to allocate non_coherent memory for
  98. * @size: Size of allocation
  99. * @dma_handle: Out argument for allocated DMA handle
  100. * @gfp: Allocation flags
  101. *
  102. * Managed dma_alloc_noncoherent(). Memory allocated using this
  103. * function will be automatically released on driver detach.
  104. *
  105. * RETURNS:
  106. * Pointer to allocated memory on success, NULL on failure.
  107. */
  108. void *dmam_alloc_noncoherent(struct device *dev, size_t size,
  109. dma_addr_t *dma_handle, gfp_t gfp)
  110. {
  111. struct dma_devres *dr;
  112. void *vaddr;
  113. dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
  114. if (!dr)
  115. return NULL;
  116. vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
  117. if (!vaddr) {
  118. devres_free(dr);
  119. return NULL;
  120. }
  121. dr->vaddr = vaddr;
  122. dr->dma_handle = *dma_handle;
  123. dr->size = size;
  124. devres_add(dev, dr);
  125. return vaddr;
  126. }
  127. EXPORT_SYMBOL(dmam_alloc_noncoherent);
  128. /**
  129. * dmam_free_coherent - Managed dma_free_noncoherent()
  130. * @dev: Device to free noncoherent memory for
  131. * @size: Size of allocation
  132. * @vaddr: Virtual address of the memory to free
  133. * @dma_handle: DMA handle of the memory to free
  134. *
  135. * Managed dma_free_noncoherent().
  136. */
  137. void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  138. dma_addr_t dma_handle)
  139. {
  140. struct dma_devres match_data = { size, vaddr, dma_handle };
  141. dma_free_noncoherent(dev, size, vaddr, dma_handle);
  142. WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
  143. &match_data));
  144. }
  145. EXPORT_SYMBOL(dmam_free_noncoherent);
  146. #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  147. static void dmam_coherent_decl_release(struct device *dev, void *res)
  148. {
  149. dma_release_declared_memory(dev);
  150. }
  151. /**
  152. * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
  153. * @dev: Device to declare coherent memory for
  154. * @phys_addr: Physical address of coherent memory to be declared
  155. * @device_addr: Device address of coherent memory to be declared
  156. * @size: Size of coherent memory to be declared
  157. * @flags: Flags
  158. *
  159. * Managed dma_declare_coherent_memory().
  160. *
  161. * RETURNS:
  162. * 0 on success, -errno on failure.
  163. */
  164. int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  165. dma_addr_t device_addr, size_t size, int flags)
  166. {
  167. void *res;
  168. int rc;
  169. res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
  170. if (!res)
  171. return -ENOMEM;
  172. rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
  173. flags);
  174. if (rc) {
  175. devres_add(dev, res);
  176. rc = 0;
  177. } else {
  178. devres_free(res);
  179. rc = -ENOMEM;
  180. }
  181. return rc;
  182. }
  183. EXPORT_SYMBOL(dmam_declare_coherent_memory);
  184. /**
  185. * dmam_release_declared_memory - Managed dma_release_declared_memory().
  186. * @dev: Device to release declared coherent memory for
  187. *
  188. * Managed dmam_release_declared_memory().
  189. */
  190. void dmam_release_declared_memory(struct device *dev)
  191. {
  192. WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
  193. }
  194. EXPORT_SYMBOL(dmam_release_declared_memory);
  195. #endif
  196. /*
  197. * Create scatter-list for the already allocated DMA buffer.
  198. */
  199. int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  200. void *cpu_addr, dma_addr_t handle, size_t size)
  201. {
  202. struct page *page = virt_to_page(cpu_addr);
  203. int ret;
  204. ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
  205. if (unlikely(ret))
  206. return ret;
  207. sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
  208. return 0;
  209. }
  210. EXPORT_SYMBOL(dma_common_get_sgtable);
  211. /*
  212. * Create userspace mapping for the DMA-coherent memory.
  213. */
  214. int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  215. void *cpu_addr, dma_addr_t dma_addr, size_t size)
  216. {
  217. int ret = -ENXIO;
  218. #if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP)
  219. unsigned long user_count = vma_pages(vma);
  220. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  221. unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
  222. unsigned long off = vma->vm_pgoff;
  223. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  224. if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
  225. return ret;
  226. if (off < count && user_count <= (count - off)) {
  227. ret = remap_pfn_range(vma, vma->vm_start,
  228. pfn + off,
  229. user_count << PAGE_SHIFT,
  230. vma->vm_page_prot);
  231. }
  232. #endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
  233. return ret;
  234. }
  235. EXPORT_SYMBOL(dma_common_mmap);
  236. #ifdef CONFIG_MMU
  237. /*
  238. * remaps an array of PAGE_SIZE pages into another vm_area
  239. * Cannot be used in non-sleeping contexts
  240. */
  241. void *dma_common_pages_remap(struct page **pages, size_t size,
  242. unsigned long vm_flags, pgprot_t prot,
  243. const void *caller)
  244. {
  245. struct vm_struct *area;
  246. area = get_vm_area_caller(size, vm_flags, caller);
  247. if (!area)
  248. return NULL;
  249. area->pages = pages;
  250. if (map_vm_area(area, prot, pages)) {
  251. vunmap(area->addr);
  252. return NULL;
  253. }
  254. return area->addr;
  255. }
  256. /*
  257. * remaps an allocated contiguous region into another vm_area.
  258. * Cannot be used in non-sleeping contexts
  259. */
  260. void *dma_common_contiguous_remap(struct page *page, size_t size,
  261. unsigned long vm_flags,
  262. pgprot_t prot, const void *caller)
  263. {
  264. int i;
  265. struct page **pages;
  266. void *ptr;
  267. pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
  268. if (!pages)
  269. return NULL;
  270. for (i = 0; i < (size >> PAGE_SHIFT); i++)
  271. pages[i] = nth_page(page, i);
  272. ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
  273. kfree(pages);
  274. return ptr;
  275. }
  276. /*
  277. * unmaps a range previously mapped by dma_common_*_remap
  278. */
  279. void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
  280. {
  281. struct vm_struct *area = find_vm_area(cpu_addr);
  282. if (!area || (area->flags & vm_flags) != vm_flags) {
  283. WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
  284. return;
  285. }
  286. unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
  287. vunmap(cpu_addr);
  288. }
  289. #endif
  290. /*
  291. * Common configuration to enable DMA API use for a device
  292. */
  293. #include <linux/pci.h>
  294. int dma_configure(struct device *dev)
  295. {
  296. struct device *bridge = NULL, *dma_dev = dev;
  297. enum dev_dma_attr attr;
  298. int ret = 0;
  299. if (dev_is_pci(dev)) {
  300. bridge = pci_get_host_bridge_device(to_pci_dev(dev));
  301. dma_dev = bridge;
  302. if (IS_ENABLED(CONFIG_OF) && dma_dev->parent &&
  303. dma_dev->parent->of_node)
  304. dma_dev = dma_dev->parent;
  305. }
  306. if (dma_dev->of_node) {
  307. ret = of_dma_configure(dev, dma_dev->of_node);
  308. } else if (has_acpi_companion(dma_dev)) {
  309. attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode));
  310. if (attr != DEV_DMA_NOT_SUPPORTED)
  311. ret = acpi_dma_configure(dev, attr);
  312. }
  313. if (bridge)
  314. pci_put_host_bridge_device(bridge);
  315. return ret;
  316. }
  317. void dma_deconfigure(struct device *dev)
  318. {
  319. of_dma_deconfigure(dev);
  320. acpi_dma_deconfigure(dev);
  321. }