dma.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /*
  2. * OpenRISC Linux
  3. *
  4. * Linux architectural port borrowing liberally from similar works of
  5. * others. All original copyrights apply as per the original source
  6. * declaration.
  7. *
  8. * Modifications for the OpenRISC architecture:
  9. * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  10. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * DMA mapping callbacks...
  18. * As alloc_coherent is the only DMA callback being used currently, that's
  19. * the only thing implemented properly. The rest need looking into...
  20. */
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dma-debug.h>
  23. #include <linux/export.h>
  24. #include <asm/cpuinfo.h>
  25. #include <asm/spr_defs.h>
  26. #include <asm/tlbflush.h>
  27. static int
  28. page_set_nocache(pte_t *pte, unsigned long addr,
  29. unsigned long next, struct mm_walk *walk)
  30. {
  31. unsigned long cl;
  32. pte_val(*pte) |= _PAGE_CI;
  33. /*
  34. * Flush the page out of the TLB so that the new page flags get
  35. * picked up next time there's an access
  36. */
  37. flush_tlb_page(NULL, addr);
  38. /* Flush page out of dcache */
  39. for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
  40. mtspr(SPR_DCBFR, cl);
  41. return 0;
  42. }
  43. static int
  44. page_clear_nocache(pte_t *pte, unsigned long addr,
  45. unsigned long next, struct mm_walk *walk)
  46. {
  47. pte_val(*pte) &= ~_PAGE_CI;
  48. /*
  49. * Flush the page out of the TLB so that the new page flags get
  50. * picked up next time there's an access
  51. */
  52. flush_tlb_page(NULL, addr);
  53. return 0;
  54. }
  55. /*
  56. * Alloc "coherent" memory, which for OpenRISC means simply uncached.
  57. *
  58. * This function effectively just calls __get_free_pages, sets the
  59. * cache-inhibit bit on those pages, and makes sure that the pages are
  60. * flushed out of the cache before they are used.
  61. *
  62. * If the NON_CONSISTENT attribute is set, then this function just
  63. * returns "normal", cachable memory.
  64. *
  65. * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
  66. * into consideration here, too. All current known implementations of
  67. * the OR1K support only strongly ordered memory accesses, so that flag
  68. * is being ignored for now; uncached but write-combined memory is a
  69. * missing feature of the OR1K.
  70. */
  71. static void *
  72. or1k_dma_alloc(struct device *dev, size_t size,
  73. dma_addr_t *dma_handle, gfp_t gfp,
  74. unsigned long attrs)
  75. {
  76. unsigned long va;
  77. void *page;
  78. struct mm_walk walk = {
  79. .pte_entry = page_set_nocache,
  80. .mm = &init_mm
  81. };
  82. page = alloc_pages_exact(size, gfp);
  83. if (!page)
  84. return NULL;
  85. /* This gives us the real physical address of the first page. */
  86. *dma_handle = __pa(page);
  87. va = (unsigned long)page;
  88. if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
  89. /*
  90. * We need to iterate through the pages, clearing the dcache for
  91. * them and setting the cache-inhibit bit.
  92. */
  93. if (walk_page_range(va, va + size, &walk)) {
  94. free_pages_exact(page, size);
  95. return NULL;
  96. }
  97. }
  98. return (void *)va;
  99. }
  100. static void
  101. or1k_dma_free(struct device *dev, size_t size, void *vaddr,
  102. dma_addr_t dma_handle, unsigned long attrs)
  103. {
  104. unsigned long va = (unsigned long)vaddr;
  105. struct mm_walk walk = {
  106. .pte_entry = page_clear_nocache,
  107. .mm = &init_mm
  108. };
  109. if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
  110. /* walk_page_range shouldn't be able to fail here */
  111. WARN_ON(walk_page_range(va, va + size, &walk));
  112. }
  113. free_pages_exact(vaddr, size);
  114. }
  115. static dma_addr_t
  116. or1k_map_page(struct device *dev, struct page *page,
  117. unsigned long offset, size_t size,
  118. enum dma_data_direction dir,
  119. unsigned long attrs)
  120. {
  121. unsigned long cl;
  122. dma_addr_t addr = page_to_phys(page) + offset;
  123. if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
  124. return addr;
  125. switch (dir) {
  126. case DMA_TO_DEVICE:
  127. /* Flush the dcache for the requested range */
  128. for (cl = addr; cl < addr + size;
  129. cl += cpuinfo.dcache_block_size)
  130. mtspr(SPR_DCBFR, cl);
  131. break;
  132. case DMA_FROM_DEVICE:
  133. /* Invalidate the dcache for the requested range */
  134. for (cl = addr; cl < addr + size;
  135. cl += cpuinfo.dcache_block_size)
  136. mtspr(SPR_DCBIR, cl);
  137. break;
  138. default:
  139. /*
  140. * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
  141. * flush nor invalidate the cache here as the area will need
  142. * to be manually synced anyway.
  143. */
  144. break;
  145. }
  146. return addr;
  147. }
  148. static void
  149. or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
  150. size_t size, enum dma_data_direction dir,
  151. unsigned long attrs)
  152. {
  153. /* Nothing special to do here... */
  154. }
  155. static int
  156. or1k_map_sg(struct device *dev, struct scatterlist *sg,
  157. int nents, enum dma_data_direction dir,
  158. unsigned long attrs)
  159. {
  160. struct scatterlist *s;
  161. int i;
  162. for_each_sg(sg, s, nents, i) {
  163. s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
  164. s->length, dir, 0);
  165. }
  166. return nents;
  167. }
  168. static void
  169. or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
  170. int nents, enum dma_data_direction dir,
  171. unsigned long attrs)
  172. {
  173. struct scatterlist *s;
  174. int i;
  175. for_each_sg(sg, s, nents, i) {
  176. or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
  177. }
  178. }
  179. static void
  180. or1k_sync_single_for_cpu(struct device *dev,
  181. dma_addr_t dma_handle, size_t size,
  182. enum dma_data_direction dir)
  183. {
  184. unsigned long cl;
  185. dma_addr_t addr = dma_handle;
  186. /* Invalidate the dcache for the requested range */
  187. for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
  188. mtspr(SPR_DCBIR, cl);
  189. }
  190. static void
  191. or1k_sync_single_for_device(struct device *dev,
  192. dma_addr_t dma_handle, size_t size,
  193. enum dma_data_direction dir)
  194. {
  195. unsigned long cl;
  196. dma_addr_t addr = dma_handle;
  197. /* Flush the dcache for the requested range */
  198. for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
  199. mtspr(SPR_DCBFR, cl);
  200. }
  201. struct dma_map_ops or1k_dma_map_ops = {
  202. .alloc = or1k_dma_alloc,
  203. .free = or1k_dma_free,
  204. .map_page = or1k_map_page,
  205. .unmap_page = or1k_unmap_page,
  206. .map_sg = or1k_map_sg,
  207. .unmap_sg = or1k_unmap_sg,
  208. .sync_single_for_cpu = or1k_sync_single_for_cpu,
  209. .sync_single_for_device = or1k_sync_single_for_device,
  210. };
  211. EXPORT_SYMBOL(or1k_dma_map_ops);
  212. /* Number of entries preallocated for DMA-API debugging */
  213. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  214. static int __init dma_init(void)
  215. {
  216. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  217. return 0;
  218. }
  219. fs_initcall(dma_init);