dma-mapping-nommu.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /*
  2. * Based on linux/arch/arm/mm/dma-mapping.c
  3. *
  4. * Copyright (C) 2000-2004 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. */
  11. #include <linux/export.h>
  12. #include <linux/mm.h>
  13. #include <linux/dma-direct.h>
  14. #include <linux/scatterlist.h>
  15. #include <asm/cachetype.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/outercache.h>
  18. #include <asm/cp15.h>
  19. #include "dma.h"
  20. /*
  21. * dma_direct_ops is used if
  22. * - MMU/MPU is off
  23. * - cpu is v7m w/o cache support
  24. * - device is coherent
  25. * otherwise arm_nommu_dma_ops is used.
  26. *
  27. * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
  28. * [1] on how to declare such memory).
  29. *
  30. * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
  31. */
  32. static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
  33. dma_addr_t *dma_handle, gfp_t gfp,
  34. unsigned long attrs)
  35. {
  36. void *ret;
  37. /*
  38. * Try generic allocator first if we are advertised that
  39. * consistency is not required.
  40. */
  41. if (attrs & DMA_ATTR_NON_CONSISTENT)
  42. return dma_direct_alloc_pages(dev, size, dma_handle, gfp,
  43. attrs);
  44. ret = dma_alloc_from_global_coherent(size, dma_handle);
  45. /*
  46. * dma_alloc_from_global_coherent() may fail because:
  47. *
  48. * - no consistent DMA region has been defined, so we can't
  49. * continue.
  50. * - there is no space left in consistent DMA region, so we
  51. * only can fallback to generic allocator if we are
  52. * advertised that consistency is not required.
  53. */
  54. WARN_ON_ONCE(ret == NULL);
  55. return ret;
  56. }
  57. static void arm_nommu_dma_free(struct device *dev, size_t size,
  58. void *cpu_addr, dma_addr_t dma_addr,
  59. unsigned long attrs)
  60. {
  61. if (attrs & DMA_ATTR_NON_CONSISTENT) {
  62. dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
  63. } else {
  64. int ret = dma_release_from_global_coherent(get_order(size),
  65. cpu_addr);
  66. WARN_ON_ONCE(ret == 0);
  67. }
  68. return;
  69. }
  70. static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
  71. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  72. unsigned long attrs)
  73. {
  74. int ret;
  75. if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
  76. return ret;
  77. return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
  78. }
  79. static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
  80. enum dma_data_direction dir)
  81. {
  82. dmac_map_area(__va(paddr), size, dir);
  83. if (dir == DMA_FROM_DEVICE)
  84. outer_inv_range(paddr, paddr + size);
  85. else
  86. outer_clean_range(paddr, paddr + size);
  87. }
  88. static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
  89. enum dma_data_direction dir)
  90. {
  91. if (dir != DMA_TO_DEVICE) {
  92. outer_inv_range(paddr, paddr + size);
  93. dmac_unmap_area(__va(paddr), size, dir);
  94. }
  95. }
  96. static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
  97. unsigned long offset, size_t size,
  98. enum dma_data_direction dir,
  99. unsigned long attrs)
  100. {
  101. dma_addr_t handle = page_to_phys(page) + offset;
  102. __dma_page_cpu_to_dev(handle, size, dir);
  103. return handle;
  104. }
  105. static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
  106. size_t size, enum dma_data_direction dir,
  107. unsigned long attrs)
  108. {
  109. __dma_page_dev_to_cpu(handle, size, dir);
  110. }
  111. static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
  112. int nents, enum dma_data_direction dir,
  113. unsigned long attrs)
  114. {
  115. int i;
  116. struct scatterlist *sg;
  117. for_each_sg(sgl, sg, nents, i) {
  118. sg_dma_address(sg) = sg_phys(sg);
  119. sg_dma_len(sg) = sg->length;
  120. __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
  121. }
  122. return nents;
  123. }
  124. static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
  125. int nents, enum dma_data_direction dir,
  126. unsigned long attrs)
  127. {
  128. struct scatterlist *sg;
  129. int i;
  130. for_each_sg(sgl, sg, nents, i)
  131. __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
  132. }
  133. static void arm_nommu_dma_sync_single_for_device(struct device *dev,
  134. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  135. {
  136. __dma_page_cpu_to_dev(handle, size, dir);
  137. }
  138. static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
  139. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  140. {
  141. __dma_page_cpu_to_dev(handle, size, dir);
  142. }
  143. static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
  144. int nents, enum dma_data_direction dir)
  145. {
  146. struct scatterlist *sg;
  147. int i;
  148. for_each_sg(sgl, sg, nents, i)
  149. __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
  150. }
  151. static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
  152. int nents, enum dma_data_direction dir)
  153. {
  154. struct scatterlist *sg;
  155. int i;
  156. for_each_sg(sgl, sg, nents, i)
  157. __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
  158. }
  159. const struct dma_map_ops arm_nommu_dma_ops = {
  160. .alloc = arm_nommu_dma_alloc,
  161. .free = arm_nommu_dma_free,
  162. .mmap = arm_nommu_dma_mmap,
  163. .map_page = arm_nommu_dma_map_page,
  164. .unmap_page = arm_nommu_dma_unmap_page,
  165. .map_sg = arm_nommu_dma_map_sg,
  166. .unmap_sg = arm_nommu_dma_unmap_sg,
  167. .sync_single_for_device = arm_nommu_dma_sync_single_for_device,
  168. .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
  169. .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
  170. .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
  171. };
  172. EXPORT_SYMBOL(arm_nommu_dma_ops);
  173. static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
  174. {
  175. return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
  176. }
  177. void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
  178. const struct iommu_ops *iommu, bool coherent)
  179. {
  180. const struct dma_map_ops *dma_ops;
  181. if (IS_ENABLED(CONFIG_CPU_V7M)) {
  182. /*
  183. * Cache support for v7m is optional, so can be treated as
  184. * coherent if no cache has been detected. Note that it is not
  185. * enough to check if MPU is in use or not since in absense of
  186. * MPU system memory map is used.
  187. */
  188. dev->archdata.dma_coherent = (cacheid) ? coherent : true;
  189. } else {
  190. /*
  191. * Assume coherent DMA in case MMU/MPU has not been set up.
  192. */
  193. dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
  194. }
  195. dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
  196. set_dma_ops(dev, dma_ops);
  197. }