dma.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * Copyright (C) 2009-2010 PetaLogix
  3. * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  4. *
  5. * Provide default implementations of the DMA mapping callbacks for
  6. * directly mapped busses.
  7. */
  8. #include <linux/device.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/gfp.h>
  11. #include <linux/dma-debug.h>
  12. #include <linux/export.h>
  13. #include <linux/bug.h>
  14. #define NOT_COHERENT_CACHE
  15. static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
  16. dma_addr_t *dma_handle, gfp_t flag,
  17. struct dma_attrs *attrs)
  18. {
  19. #ifdef NOT_COHERENT_CACHE
  20. return consistent_alloc(flag, size, dma_handle);
  21. #else
  22. void *ret;
  23. struct page *page;
  24. int node = dev_to_node(dev);
  25. /* ignore region specifiers */
  26. flag &= ~(__GFP_HIGHMEM);
  27. page = alloc_pages_node(node, flag, get_order(size));
  28. if (page == NULL)
  29. return NULL;
  30. ret = page_address(page);
  31. memset(ret, 0, size);
  32. *dma_handle = virt_to_phys(ret);
  33. return ret;
  34. #endif
  35. }
  36. static void dma_direct_free_coherent(struct device *dev, size_t size,
  37. void *vaddr, dma_addr_t dma_handle,
  38. struct dma_attrs *attrs)
  39. {
  40. #ifdef NOT_COHERENT_CACHE
  41. consistent_free(size, vaddr);
  42. #else
  43. free_pages((unsigned long)vaddr, get_order(size));
  44. #endif
  45. }
  46. static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
  47. int nents, enum dma_data_direction direction,
  48. struct dma_attrs *attrs)
  49. {
  50. struct scatterlist *sg;
  51. int i;
  52. /* FIXME this part of code is untested */
  53. for_each_sg(sgl, sg, nents, i) {
  54. sg->dma_address = sg_phys(sg);
  55. __dma_sync(sg_phys(sg), sg->length, direction);
  56. }
  57. return nents;
  58. }
  59. static int dma_direct_dma_supported(struct device *dev, u64 mask)
  60. {
  61. return 1;
  62. }
  63. static inline dma_addr_t dma_direct_map_page(struct device *dev,
  64. struct page *page,
  65. unsigned long offset,
  66. size_t size,
  67. enum dma_data_direction direction,
  68. struct dma_attrs *attrs)
  69. {
  70. __dma_sync(page_to_phys(page) + offset, size, direction);
  71. return page_to_phys(page) + offset;
  72. }
  73. static inline void dma_direct_unmap_page(struct device *dev,
  74. dma_addr_t dma_address,
  75. size_t size,
  76. enum dma_data_direction direction,
  77. struct dma_attrs *attrs)
  78. {
  79. /* There is not necessary to do cache cleanup
  80. *
  81. * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
  82. * dma_address is physical address
  83. */
  84. __dma_sync(dma_address, size, direction);
  85. }
  86. static inline void
  87. dma_direct_sync_single_for_cpu(struct device *dev,
  88. dma_addr_t dma_handle, size_t size,
  89. enum dma_data_direction direction)
  90. {
  91. /*
  92. * It's pointless to flush the cache as the memory segment
  93. * is given to the CPU
  94. */
  95. if (direction == DMA_FROM_DEVICE)
  96. __dma_sync(dma_handle, size, direction);
  97. }
  98. static inline void
  99. dma_direct_sync_single_for_device(struct device *dev,
  100. dma_addr_t dma_handle, size_t size,
  101. enum dma_data_direction direction)
  102. {
  103. /*
  104. * It's pointless to invalidate the cache if the device isn't
  105. * supposed to write to the relevant region
  106. */
  107. if (direction == DMA_TO_DEVICE)
  108. __dma_sync(dma_handle, size, direction);
  109. }
  110. static inline void
  111. dma_direct_sync_sg_for_cpu(struct device *dev,
  112. struct scatterlist *sgl, int nents,
  113. enum dma_data_direction direction)
  114. {
  115. struct scatterlist *sg;
  116. int i;
  117. /* FIXME this part of code is untested */
  118. if (direction == DMA_FROM_DEVICE)
  119. for_each_sg(sgl, sg, nents, i)
  120. __dma_sync(sg->dma_address, sg->length, direction);
  121. }
  122. static inline void
  123. dma_direct_sync_sg_for_device(struct device *dev,
  124. struct scatterlist *sgl, int nents,
  125. enum dma_data_direction direction)
  126. {
  127. struct scatterlist *sg;
  128. int i;
  129. /* FIXME this part of code is untested */
  130. if (direction == DMA_TO_DEVICE)
  131. for_each_sg(sgl, sg, nents, i)
  132. __dma_sync(sg->dma_address, sg->length, direction);
  133. }
  134. static
  135. int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
  136. void *cpu_addr, dma_addr_t handle, size_t size,
  137. struct dma_attrs *attrs)
  138. {
  139. #ifdef CONFIG_MMU
  140. unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  141. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  142. unsigned long off = vma->vm_pgoff;
  143. unsigned long pfn;
  144. if (off >= count || user_count > (count - off))
  145. return -ENXIO;
  146. #ifdef NOT_COHERENT_CACHE
  147. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  148. pfn = consistent_virt_to_pfn(cpu_addr);
  149. #else
  150. pfn = virt_to_pfn(cpu_addr);
  151. #endif
  152. return remap_pfn_range(vma, vma->vm_start, pfn + off,
  153. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  154. #else
  155. return -ENXIO;
  156. #endif
  157. }
  158. struct dma_map_ops dma_direct_ops = {
  159. .alloc = dma_direct_alloc_coherent,
  160. .free = dma_direct_free_coherent,
  161. .mmap = dma_direct_mmap_coherent,
  162. .map_sg = dma_direct_map_sg,
  163. .dma_supported = dma_direct_dma_supported,
  164. .map_page = dma_direct_map_page,
  165. .unmap_page = dma_direct_unmap_page,
  166. .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
  167. .sync_single_for_device = dma_direct_sync_single_for_device,
  168. .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
  169. .sync_sg_for_device = dma_direct_sync_sg_for_device,
  170. };
  171. EXPORT_SYMBOL(dma_direct_ops);
  172. /* Number of entries preallocated for DMA-API debugging */
  173. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  174. static int __init dma_init(void)
  175. {
  176. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  177. return 0;
  178. }
  179. fs_initcall(dma_init);