dma.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /*
  2. * Copyright (C) 2009-2010 PetaLogix
  3. * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  4. *
  5. * Provide default implementations of the DMA mapping callbacks for
  6. * directly mapped busses.
  7. */
  8. #include <linux/device.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/gfp.h>
  11. #include <linux/dma-debug.h>
  12. #include <linux/export.h>
  13. #include <linux/bug.h>
  14. #define NOT_COHERENT_CACHE
  15. static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
  16. dma_addr_t *dma_handle, gfp_t flag,
  17. unsigned long attrs)
  18. {
  19. #ifdef NOT_COHERENT_CACHE
  20. return consistent_alloc(flag, size, dma_handle);
  21. #else
  22. void *ret;
  23. struct page *page;
  24. int node = dev_to_node(dev);
  25. /* ignore region specifiers */
  26. flag &= ~(__GFP_HIGHMEM);
  27. page = alloc_pages_node(node, flag, get_order(size));
  28. if (page == NULL)
  29. return NULL;
  30. ret = page_address(page);
  31. memset(ret, 0, size);
  32. *dma_handle = virt_to_phys(ret);
  33. return ret;
  34. #endif
  35. }
  36. static void dma_direct_free_coherent(struct device *dev, size_t size,
  37. void *vaddr, dma_addr_t dma_handle,
  38. unsigned long attrs)
  39. {
  40. #ifdef NOT_COHERENT_CACHE
  41. consistent_free(size, vaddr);
  42. #else
  43. free_pages((unsigned long)vaddr, get_order(size));
  44. #endif
  45. }
  46. static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
  47. int nents, enum dma_data_direction direction,
  48. unsigned long attrs)
  49. {
  50. struct scatterlist *sg;
  51. int i;
  52. /* FIXME this part of code is untested */
  53. for_each_sg(sgl, sg, nents, i) {
  54. sg->dma_address = sg_phys(sg);
  55. if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
  56. continue;
  57. __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
  58. sg->length, direction);
  59. }
  60. return nents;
  61. }
  62. static int dma_direct_dma_supported(struct device *dev, u64 mask)
  63. {
  64. return 1;
  65. }
  66. static inline dma_addr_t dma_direct_map_page(struct device *dev,
  67. struct page *page,
  68. unsigned long offset,
  69. size_t size,
  70. enum dma_data_direction direction,
  71. unsigned long attrs)
  72. {
  73. if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  74. __dma_sync(page_to_phys(page) + offset, size, direction);
  75. return page_to_phys(page) + offset;
  76. }
  77. static inline void dma_direct_unmap_page(struct device *dev,
  78. dma_addr_t dma_address,
  79. size_t size,
  80. enum dma_data_direction direction,
  81. unsigned long attrs)
  82. {
  83. /* There is not necessary to do cache cleanup
  84. *
  85. * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
  86. * dma_address is physical address
  87. */
  88. if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  89. __dma_sync(dma_address, size, direction);
  90. }
  91. static inline void
  92. dma_direct_sync_single_for_cpu(struct device *dev,
  93. dma_addr_t dma_handle, size_t size,
  94. enum dma_data_direction direction)
  95. {
  96. /*
  97. * It's pointless to flush the cache as the memory segment
  98. * is given to the CPU
  99. */
  100. if (direction == DMA_FROM_DEVICE)
  101. __dma_sync(dma_handle, size, direction);
  102. }
  103. static inline void
  104. dma_direct_sync_single_for_device(struct device *dev,
  105. dma_addr_t dma_handle, size_t size,
  106. enum dma_data_direction direction)
  107. {
  108. /*
  109. * It's pointless to invalidate the cache if the device isn't
  110. * supposed to write to the relevant region
  111. */
  112. if (direction == DMA_TO_DEVICE)
  113. __dma_sync(dma_handle, size, direction);
  114. }
  115. static inline void
  116. dma_direct_sync_sg_for_cpu(struct device *dev,
  117. struct scatterlist *sgl, int nents,
  118. enum dma_data_direction direction)
  119. {
  120. struct scatterlist *sg;
  121. int i;
  122. /* FIXME this part of code is untested */
  123. if (direction == DMA_FROM_DEVICE)
  124. for_each_sg(sgl, sg, nents, i)
  125. __dma_sync(sg->dma_address, sg->length, direction);
  126. }
  127. static inline void
  128. dma_direct_sync_sg_for_device(struct device *dev,
  129. struct scatterlist *sgl, int nents,
  130. enum dma_data_direction direction)
  131. {
  132. struct scatterlist *sg;
  133. int i;
  134. /* FIXME this part of code is untested */
  135. if (direction == DMA_TO_DEVICE)
  136. for_each_sg(sgl, sg, nents, i)
  137. __dma_sync(sg->dma_address, sg->length, direction);
  138. }
  139. static
  140. int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
  141. void *cpu_addr, dma_addr_t handle, size_t size,
  142. unsigned long attrs)
  143. {
  144. #ifdef CONFIG_MMU
  145. unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  146. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  147. unsigned long off = vma->vm_pgoff;
  148. unsigned long pfn;
  149. if (off >= count || user_count > (count - off))
  150. return -ENXIO;
  151. #ifdef NOT_COHERENT_CACHE
  152. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  153. pfn = consistent_virt_to_pfn(cpu_addr);
  154. #else
  155. pfn = virt_to_pfn(cpu_addr);
  156. #endif
  157. return remap_pfn_range(vma, vma->vm_start, pfn + off,
  158. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  159. #else
  160. return -ENXIO;
  161. #endif
  162. }
  163. struct dma_map_ops dma_direct_ops = {
  164. .alloc = dma_direct_alloc_coherent,
  165. .free = dma_direct_free_coherent,
  166. .mmap = dma_direct_mmap_coherent,
  167. .map_sg = dma_direct_map_sg,
  168. .dma_supported = dma_direct_dma_supported,
  169. .map_page = dma_direct_map_page,
  170. .unmap_page = dma_direct_unmap_page,
  171. .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
  172. .sync_single_for_device = dma_direct_sync_single_for_device,
  173. .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
  174. .sync_sg_for_device = dma_direct_sync_sg_for_device,
  175. };
  176. EXPORT_SYMBOL(dma_direct_ops);
  177. /* Number of entries preallocated for DMA-API debugging */
  178. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  179. static int __init dma_init(void)
  180. {
  181. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  182. return 0;
  183. }
  184. fs_initcall(dma_init);