dma-mapping.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * DMA Mapping glue for ARC
  3. *
  4. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef ASM_ARC_DMA_MAPPING_H
  11. #define ASM_ARC_DMA_MAPPING_H
  12. #include <asm-generic/dma-coherent.h>
  13. #include <asm/cacheflush.h>
  14. #include <plat/dma_addr.h>
  15. void *dma_alloc_noncoherent(struct device *dev, size_t size,
  16. dma_addr_t *dma_handle, gfp_t gfp);
  17. void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  18. dma_addr_t dma_handle);
  19. void *dma_alloc_coherent(struct device *dev, size_t size,
  20. dma_addr_t *dma_handle, gfp_t gfp);
  21. void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
  22. dma_addr_t dma_handle);
  23. /* drivers/base/dma-mapping.c */
  24. extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  25. void *cpu_addr, dma_addr_t dma_addr, size_t size);
  26. extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  27. void *cpu_addr, dma_addr_t dma_addr,
  28. size_t size);
  29. #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
  30. #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
  31. /*
  32. * streaming DMA Mapping API...
  33. * CPU accesses page via normal paddr, thus needs to explicitly made
  34. * consistent before each use
  35. */
  36. static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
  37. enum dma_data_direction dir)
  38. {
  39. switch (dir) {
  40. case DMA_FROM_DEVICE:
  41. dma_cache_inv(paddr, size);
  42. break;
  43. case DMA_TO_DEVICE:
  44. dma_cache_wback(paddr, size);
  45. break;
  46. case DMA_BIDIRECTIONAL:
  47. dma_cache_wback_inv(paddr, size);
  48. break;
  49. default:
  50. pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
  51. }
  52. }
  53. void __arc_dma_cache_sync(unsigned long paddr, size_t size,
  54. enum dma_data_direction dir);
  55. #define _dma_cache_sync(addr, sz, dir) \
  56. do { \
  57. if (__builtin_constant_p(dir)) \
  58. __inline_dma_cache_sync(addr, sz, dir); \
  59. else \
  60. __arc_dma_cache_sync(addr, sz, dir); \
  61. } \
  62. while (0);
  63. static inline dma_addr_t
  64. dma_map_single(struct device *dev, void *cpu_addr, size_t size,
  65. enum dma_data_direction dir)
  66. {
  67. _dma_cache_sync((unsigned long)cpu_addr, size, dir);
  68. return plat_kernel_addr_to_dma(dev, cpu_addr);
  69. }
  70. static inline void
  71. dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
  72. size_t size, enum dma_data_direction dir)
  73. {
  74. }
  75. static inline dma_addr_t
  76. dma_map_page(struct device *dev, struct page *page,
  77. unsigned long offset, size_t size,
  78. enum dma_data_direction dir)
  79. {
  80. unsigned long paddr = page_to_phys(page) + offset;
  81. return dma_map_single(dev, (void *)paddr, size, dir);
  82. }
  83. static inline void
  84. dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
  85. size_t size, enum dma_data_direction dir)
  86. {
  87. }
  88. static inline int
  89. dma_map_sg(struct device *dev, struct scatterlist *sg,
  90. int nents, enum dma_data_direction dir)
  91. {
  92. struct scatterlist *s;
  93. int i;
  94. for_each_sg(sg, s, nents, i)
  95. sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
  96. s->length, dir);
  97. return nents;
  98. }
  99. static inline void
  100. dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  101. int nents, enum dma_data_direction dir)
  102. {
  103. struct scatterlist *s;
  104. int i;
  105. for_each_sg(sg, s, nents, i)
  106. dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
  107. }
  108. static inline void
  109. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  110. size_t size, enum dma_data_direction dir)
  111. {
  112. _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
  113. DMA_FROM_DEVICE);
  114. }
  115. static inline void
  116. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  117. size_t size, enum dma_data_direction dir)
  118. {
  119. _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
  120. DMA_TO_DEVICE);
  121. }
  122. static inline void
  123. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  124. unsigned long offset, size_t size,
  125. enum dma_data_direction direction)
  126. {
  127. _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
  128. size, DMA_FROM_DEVICE);
  129. }
  130. static inline void
  131. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  132. unsigned long offset, size_t size,
  133. enum dma_data_direction direction)
  134. {
  135. _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
  136. size, DMA_TO_DEVICE);
  137. }
  138. static inline void
  139. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  140. enum dma_data_direction dir)
  141. {
  142. int i;
  143. for (i = 0; i < nelems; i++, sg++)
  144. _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
  145. }
  146. static inline void
  147. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  148. enum dma_data_direction dir)
  149. {
  150. int i;
  151. for (i = 0; i < nelems; i++, sg++)
  152. _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
  153. }
  154. static inline int dma_supported(struct device *dev, u64 dma_mask)
  155. {
  156. /* Support 32 bit DMA mask exclusively */
  157. return dma_mask == DMA_BIT_MASK(32);
  158. }
  159. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  160. {
  161. return 0;
  162. }
  163. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  164. {
  165. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  166. return -EIO;
  167. *dev->dma_mask = dma_mask;
  168. return 0;
  169. }
  170. #endif