mm.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. #include <linux/cpu.h>
  2. #include <linux/dma-mapping.h>
  3. #include <linux/gfp.h>
  4. #include <linux/highmem.h>
  5. #include <linux/export.h>
  6. #include <linux/memblock.h>
  7. #include <linux/of_address.h>
  8. #include <linux/slab.h>
  9. #include <linux/types.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/swiotlb.h>
  13. #include <xen/xen.h>
  14. #include <xen/interface/grant_table.h>
  15. #include <xen/interface/memory.h>
  16. #include <xen/page.h>
  17. #include <xen/swiotlb-xen.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/xen/hypercall.h>
  20. #include <asm/xen/interface.h>
  21. unsigned long xen_get_swiotlb_free_pages(unsigned int order)
  22. {
  23. struct memblock_region *reg;
  24. gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
  25. for_each_memblock(memory, reg) {
  26. if (reg->base < (phys_addr_t)0xffffffff) {
  27. flags |= __GFP_DMA;
  28. break;
  29. }
  30. }
  31. return __get_free_pages(flags, order);
  32. }
  33. enum dma_cache_op {
  34. DMA_UNMAP,
  35. DMA_MAP,
  36. };
  37. static bool hypercall_cflush = false;
  38. /* functions called by SWIOTLB */
  39. static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
  40. size_t size, enum dma_data_direction dir, enum dma_cache_op op)
  41. {
  42. struct gnttab_cache_flush cflush;
  43. unsigned long xen_pfn;
  44. size_t left = size;
  45. xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
  46. offset %= XEN_PAGE_SIZE;
  47. do {
  48. size_t len = left;
  49. /* buffers in highmem or foreign pages cannot cross page
  50. * boundaries */
  51. if (len + offset > XEN_PAGE_SIZE)
  52. len = XEN_PAGE_SIZE - offset;
  53. cflush.op = 0;
  54. cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
  55. cflush.offset = offset;
  56. cflush.length = len;
  57. if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
  58. cflush.op = GNTTAB_CACHE_INVAL;
  59. if (op == DMA_MAP) {
  60. if (dir == DMA_FROM_DEVICE)
  61. cflush.op = GNTTAB_CACHE_INVAL;
  62. else
  63. cflush.op = GNTTAB_CACHE_CLEAN;
  64. }
  65. if (cflush.op)
  66. HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
  67. offset = 0;
  68. xen_pfn++;
  69. left -= len;
  70. } while (left);
  71. }
  72. static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
  73. size_t size, enum dma_data_direction dir)
  74. {
  75. dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
  76. }
  77. static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
  78. size_t size, enum dma_data_direction dir)
  79. {
  80. dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
  81. }
  82. void __xen_dma_map_page(struct device *hwdev, struct page *page,
  83. dma_addr_t dev_addr, unsigned long offset, size_t size,
  84. enum dma_data_direction dir, unsigned long attrs)
  85. {
  86. if (is_device_dma_coherent(hwdev))
  87. return;
  88. if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
  89. return;
  90. __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
  91. }
  92. void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
  93. size_t size, enum dma_data_direction dir,
  94. unsigned long attrs)
  95. {
  96. if (is_device_dma_coherent(hwdev))
  97. return;
  98. if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
  99. return;
  100. __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
  101. }
  102. void __xen_dma_sync_single_for_cpu(struct device *hwdev,
  103. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  104. {
  105. if (is_device_dma_coherent(hwdev))
  106. return;
  107. __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
  108. }
  109. void __xen_dma_sync_single_for_device(struct device *hwdev,
  110. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  111. {
  112. if (is_device_dma_coherent(hwdev))
  113. return;
  114. __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
  115. }
  116. bool xen_arch_need_swiotlb(struct device *dev,
  117. phys_addr_t phys,
  118. dma_addr_t dev_addr)
  119. {
  120. unsigned int xen_pfn = XEN_PFN_DOWN(phys);
  121. unsigned int bfn = XEN_PFN_DOWN(dev_addr);
  122. /*
  123. * The swiotlb buffer should be used if
  124. * - Xen doesn't have the cache flush hypercall
  125. * - The Linux page refers to foreign memory
  126. * - The device doesn't support coherent DMA request
  127. *
  128. * The Linux page may be spanned acrros multiple Xen page, although
  129. * it's not possible to have a mix of local and foreign Xen page.
  130. * Furthermore, range_straddles_page_boundary is already checking
  131. * if buffer is physically contiguous in the host RAM.
  132. *
  133. * Therefore we only need to check the first Xen page to know if we
  134. * require a bounce buffer because the device doesn't support coherent
  135. * memory and we are not able to flush the cache.
  136. */
  137. return (!hypercall_cflush && (xen_pfn != bfn) &&
  138. !is_device_dma_coherent(dev));
  139. }
  140. int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
  141. unsigned int address_bits,
  142. dma_addr_t *dma_handle)
  143. {
  144. if (!xen_initial_domain())
  145. return -EINVAL;
  146. /* we assume that dom0 is mapped 1:1 for now */
  147. *dma_handle = pstart;
  148. return 0;
  149. }
  150. EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
  151. void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
  152. {
  153. return;
  154. }
  155. EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
  156. const struct dma_map_ops *xen_dma_ops;
  157. EXPORT_SYMBOL(xen_dma_ops);
  158. int __init xen_mm_init(void)
  159. {
  160. struct gnttab_cache_flush cflush;
  161. if (!xen_initial_domain())
  162. return 0;
  163. xen_swiotlb_init(1, false);
  164. xen_dma_ops = &xen_swiotlb_dma_ops;
  165. cflush.op = 0;
  166. cflush.a.dev_bus_addr = 0;
  167. cflush.offset = 0;
  168. cflush.length = 0;
  169. if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
  170. hypercall_cflush = true;
  171. return 0;
  172. }
  173. arch_initcall(xen_mm_init);