consistent.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. /*
  2. * arch/sh/mm/consistent.c
  3. *
  4. * Copyright (C) 2004 - 2007 Paul Mundt
  5. *
  6. * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/mm.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/dma-mapping.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/addrspace.h>
  17. #include <asm/io.h>
  18. struct dma_coherent_mem {
  19. void *virt_base;
  20. u32 device_base;
  21. int size;
  22. int flags;
  23. unsigned long *bitmap;
  24. };
  25. void *dma_alloc_coherent(struct device *dev, size_t size,
  26. dma_addr_t *dma_handle, gfp_t gfp)
  27. {
  28. void *ret, *ret_nocache;
  29. struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
  30. int order = get_order(size);
  31. if (mem) {
  32. int page = bitmap_find_free_region(mem->bitmap, mem->size,
  33. order);
  34. if (page >= 0) {
  35. *dma_handle = mem->device_base + (page << PAGE_SHIFT);
  36. ret = mem->virt_base + (page << PAGE_SHIFT);
  37. memset(ret, 0, size);
  38. return ret;
  39. }
  40. if (mem->flags & DMA_MEMORY_EXCLUSIVE)
  41. return NULL;
  42. }
  43. ret = (void *)__get_free_pages(gfp, order);
  44. if (!ret)
  45. return NULL;
  46. memset(ret, 0, size);
  47. /*
  48. * Pages from the page allocator may have data present in
  49. * cache. So flush the cache before using uncached memory.
  50. */
  51. dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
  52. ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
  53. if (!ret_nocache) {
  54. free_pages((unsigned long)ret, order);
  55. return NULL;
  56. }
  57. *dma_handle = virt_to_phys(ret);
  58. return ret_nocache;
  59. }
  60. EXPORT_SYMBOL(dma_alloc_coherent);
  61. void dma_free_coherent(struct device *dev, size_t size,
  62. void *vaddr, dma_addr_t dma_handle)
  63. {
  64. struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
  65. int order = get_order(size);
  66. if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
  67. int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
  68. bitmap_release_region(mem->bitmap, page, order);
  69. } else {
  70. WARN_ON(irqs_disabled()); /* for portability */
  71. BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
  72. free_pages((unsigned long)phys_to_virt(dma_handle), order);
  73. iounmap(vaddr);
  74. }
  75. }
  76. EXPORT_SYMBOL(dma_free_coherent);
  77. int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  78. dma_addr_t device_addr, size_t size, int flags)
  79. {
  80. void __iomem *mem_base = NULL;
  81. int pages = size >> PAGE_SHIFT;
  82. int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  83. if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
  84. goto out;
  85. if (!size)
  86. goto out;
  87. if (dev->dma_mem)
  88. goto out;
  89. /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
  90. mem_base = ioremap_nocache(bus_addr, size);
  91. if (!mem_base)
  92. goto out;
  93. dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  94. if (!dev->dma_mem)
  95. goto out;
  96. dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  97. if (!dev->dma_mem->bitmap)
  98. goto free1_out;
  99. dev->dma_mem->virt_base = mem_base;
  100. dev->dma_mem->device_base = device_addr;
  101. dev->dma_mem->size = pages;
  102. dev->dma_mem->flags = flags;
  103. if (flags & DMA_MEMORY_MAP)
  104. return DMA_MEMORY_MAP;
  105. return DMA_MEMORY_IO;
  106. free1_out:
  107. kfree(dev->dma_mem);
  108. out:
  109. if (mem_base)
  110. iounmap(mem_base);
  111. return 0;
  112. }
  113. EXPORT_SYMBOL(dma_declare_coherent_memory);
  114. void dma_release_declared_memory(struct device *dev)
  115. {
  116. struct dma_coherent_mem *mem = dev->dma_mem;
  117. if (!mem)
  118. return;
  119. dev->dma_mem = NULL;
  120. iounmap(mem->virt_base);
  121. kfree(mem->bitmap);
  122. kfree(mem);
  123. }
  124. EXPORT_SYMBOL(dma_release_declared_memory);
  125. void *dma_mark_declared_memory_occupied(struct device *dev,
  126. dma_addr_t device_addr, size_t size)
  127. {
  128. struct dma_coherent_mem *mem = dev->dma_mem;
  129. int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  130. int pos, err;
  131. if (!mem)
  132. return ERR_PTR(-EINVAL);
  133. pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
  134. err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
  135. if (err != 0)
  136. return ERR_PTR(err);
  137. return mem->virt_base + (pos << PAGE_SHIFT);
  138. }
  139. EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
  140. void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  141. enum dma_data_direction direction)
  142. {
  143. #ifdef CONFIG_CPU_SH5
  144. void *p1addr = vaddr;
  145. #else
  146. void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
  147. #endif
  148. switch (direction) {
  149. case DMA_FROM_DEVICE: /* invalidate only */
  150. __flush_invalidate_region(p1addr, size);
  151. break;
  152. case DMA_TO_DEVICE: /* writeback only */
  153. __flush_wback_region(p1addr, size);
  154. break;
  155. case DMA_BIDIRECTIONAL: /* writeback and invalidate */
  156. __flush_purge_region(p1addr, size);
  157. break;
  158. default:
  159. BUG();
  160. }
  161. }
  162. EXPORT_SYMBOL(dma_cache_sync);
  163. int platform_resource_setup_memory(struct platform_device *pdev,
  164. char *name, unsigned long memsize)
  165. {
  166. struct resource *r;
  167. dma_addr_t dma_handle;
  168. void *buf;
  169. r = pdev->resource + pdev->num_resources - 1;
  170. if (r->flags) {
  171. pr_warning("%s: unable to find empty space for resource\n",
  172. name);
  173. return -EINVAL;
  174. }
  175. buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
  176. if (!buf) {
  177. pr_warning("%s: unable to allocate memory\n", name);
  178. return -ENOMEM;
  179. }
  180. memset(buf, 0, memsize);
  181. r->flags = IORESOURCE_MEM;
  182. r->start = dma_handle;
  183. r->end = r->start + memsize - 1;
  184. r->name = name;
  185. return 0;
  186. }