dma.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2009-2010 PetaLogix
  4. * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  5. *
  6. * Provide default implementations of the DMA mapping callbacks for
  7. * directly mapped busses.
  8. */
  9. #include <linux/device.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/gfp.h>
  12. #include <linux/dma-debug.h>
  13. #include <linux/export.h>
  14. #include <linux/bug.h>
  15. #include <asm/cacheflush.h>
  16. static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
  17. dma_addr_t *dma_handle, gfp_t flag,
  18. unsigned long attrs)
  19. {
  20. return consistent_alloc(flag, size, dma_handle);
  21. }
  22. static void dma_nommu_free_coherent(struct device *dev, size_t size,
  23. void *vaddr, dma_addr_t dma_handle,
  24. unsigned long attrs)
  25. {
  26. consistent_free(size, vaddr);
  27. }
  28. static inline void __dma_sync(unsigned long paddr,
  29. size_t size, enum dma_data_direction direction)
  30. {
  31. switch (direction) {
  32. case DMA_TO_DEVICE:
  33. case DMA_BIDIRECTIONAL:
  34. flush_dcache_range(paddr, paddr + size);
  35. break;
  36. case DMA_FROM_DEVICE:
  37. invalidate_dcache_range(paddr, paddr + size);
  38. break;
  39. default:
  40. BUG();
  41. }
  42. }
  43. static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
  44. int nents, enum dma_data_direction direction,
  45. unsigned long attrs)
  46. {
  47. struct scatterlist *sg;
  48. int i;
  49. /* FIXME this part of code is untested */
  50. for_each_sg(sgl, sg, nents, i) {
  51. sg->dma_address = sg_phys(sg);
  52. if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
  53. continue;
  54. __dma_sync(sg_phys(sg), sg->length, direction);
  55. }
  56. return nents;
  57. }
  58. static inline dma_addr_t dma_nommu_map_page(struct device *dev,
  59. struct page *page,
  60. unsigned long offset,
  61. size_t size,
  62. enum dma_data_direction direction,
  63. unsigned long attrs)
  64. {
  65. if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  66. __dma_sync(page_to_phys(page) + offset, size, direction);
  67. return page_to_phys(page) + offset;
  68. }
  69. static inline void dma_nommu_unmap_page(struct device *dev,
  70. dma_addr_t dma_address,
  71. size_t size,
  72. enum dma_data_direction direction,
  73. unsigned long attrs)
  74. {
  75. /* There is not necessary to do cache cleanup
  76. *
  77. * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
  78. * dma_address is physical address
  79. */
  80. if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  81. __dma_sync(dma_address, size, direction);
  82. }
  83. static inline void
  84. dma_nommu_sync_single_for_cpu(struct device *dev,
  85. dma_addr_t dma_handle, size_t size,
  86. enum dma_data_direction direction)
  87. {
  88. /*
  89. * It's pointless to flush the cache as the memory segment
  90. * is given to the CPU
  91. */
  92. if (direction == DMA_FROM_DEVICE)
  93. __dma_sync(dma_handle, size, direction);
  94. }
  95. static inline void
  96. dma_nommu_sync_single_for_device(struct device *dev,
  97. dma_addr_t dma_handle, size_t size,
  98. enum dma_data_direction direction)
  99. {
  100. /*
  101. * It's pointless to invalidate the cache if the device isn't
  102. * supposed to write to the relevant region
  103. */
  104. if (direction == DMA_TO_DEVICE)
  105. __dma_sync(dma_handle, size, direction);
  106. }
  107. static inline void
  108. dma_nommu_sync_sg_for_cpu(struct device *dev,
  109. struct scatterlist *sgl, int nents,
  110. enum dma_data_direction direction)
  111. {
  112. struct scatterlist *sg;
  113. int i;
  114. /* FIXME this part of code is untested */
  115. if (direction == DMA_FROM_DEVICE)
  116. for_each_sg(sgl, sg, nents, i)
  117. __dma_sync(sg->dma_address, sg->length, direction);
  118. }
  119. static inline void
  120. dma_nommu_sync_sg_for_device(struct device *dev,
  121. struct scatterlist *sgl, int nents,
  122. enum dma_data_direction direction)
  123. {
  124. struct scatterlist *sg;
  125. int i;
  126. /* FIXME this part of code is untested */
  127. if (direction == DMA_TO_DEVICE)
  128. for_each_sg(sgl, sg, nents, i)
  129. __dma_sync(sg->dma_address, sg->length, direction);
  130. }
  131. static
  132. int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
  133. void *cpu_addr, dma_addr_t handle, size_t size,
  134. unsigned long attrs)
  135. {
  136. #ifdef CONFIG_MMU
  137. unsigned long user_count = vma_pages(vma);
  138. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  139. unsigned long off = vma->vm_pgoff;
  140. unsigned long pfn;
  141. if (off >= count || user_count > (count - off))
  142. return -ENXIO;
  143. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  144. pfn = consistent_virt_to_pfn(cpu_addr);
  145. return remap_pfn_range(vma, vma->vm_start, pfn + off,
  146. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  147. #else
  148. return -ENXIO;
  149. #endif
  150. }
  151. const struct dma_map_ops dma_nommu_ops = {
  152. .alloc = dma_nommu_alloc_coherent,
  153. .free = dma_nommu_free_coherent,
  154. .mmap = dma_nommu_mmap_coherent,
  155. .map_sg = dma_nommu_map_sg,
  156. .map_page = dma_nommu_map_page,
  157. .unmap_page = dma_nommu_unmap_page,
  158. .sync_single_for_cpu = dma_nommu_sync_single_for_cpu,
  159. .sync_single_for_device = dma_nommu_sync_single_for_device,
  160. .sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu,
  161. .sync_sg_for_device = dma_nommu_sync_sg_for_device,
  162. };
  163. EXPORT_SYMBOL(dma_nommu_ops);
  164. /* Number of entries preallocated for DMA-API debugging */
  165. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  166. static int __init dma_init(void)
  167. {
  168. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  169. return 0;
  170. }
  171. fs_initcall(dma_init);