mm.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. #include <linux/cpu.h>
  2. #include <linux/dma-mapping.h>
  3. #include <linux/bootmem.h>
  4. #include <linux/gfp.h>
  5. #include <linux/highmem.h>
  6. #include <linux/export.h>
  7. #include <linux/of_address.h>
  8. #include <linux/slab.h>
  9. #include <linux/types.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/swiotlb.h>
  13. #include <xen/xen.h>
  14. #include <xen/interface/grant_table.h>
  15. #include <xen/interface/memory.h>
  16. #include <xen/swiotlb-xen.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/xen/page.h>
  19. #include <asm/xen/hypercall.h>
  20. #include <asm/xen/interface.h>
  21. enum dma_cache_op {
  22. DMA_UNMAP,
  23. DMA_MAP,
  24. };
  25. static bool hypercall_cflush = false;
  26. /* functions called by SWIOTLB */
  27. static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
  28. size_t size, enum dma_data_direction dir, enum dma_cache_op op)
  29. {
  30. struct gnttab_cache_flush cflush;
  31. unsigned long pfn;
  32. size_t left = size;
  33. pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
  34. offset %= PAGE_SIZE;
  35. do {
  36. size_t len = left;
  37. /* buffers in highmem or foreign pages cannot cross page
  38. * boundaries */
  39. if (len + offset > PAGE_SIZE)
  40. len = PAGE_SIZE - offset;
  41. cflush.op = 0;
  42. cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
  43. cflush.offset = offset;
  44. cflush.length = len;
  45. if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
  46. cflush.op = GNTTAB_CACHE_INVAL;
  47. if (op == DMA_MAP) {
  48. if (dir == DMA_FROM_DEVICE)
  49. cflush.op = GNTTAB_CACHE_INVAL;
  50. else
  51. cflush.op = GNTTAB_CACHE_CLEAN;
  52. }
  53. if (cflush.op)
  54. HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
  55. offset = 0;
  56. pfn++;
  57. left -= len;
  58. } while (left);
  59. }
  60. static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
  61. size_t size, enum dma_data_direction dir)
  62. {
  63. dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
  64. }
  65. static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
  66. size_t size, enum dma_data_direction dir)
  67. {
  68. dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
  69. }
  70. void __xen_dma_map_page(struct device *hwdev, struct page *page,
  71. dma_addr_t dev_addr, unsigned long offset, size_t size,
  72. enum dma_data_direction dir, struct dma_attrs *attrs)
  73. {
  74. if (is_device_dma_coherent(hwdev))
  75. return;
  76. if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
  77. return;
  78. __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
  79. }
  80. void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
  81. size_t size, enum dma_data_direction dir,
  82. struct dma_attrs *attrs)
  83. {
  84. if (is_device_dma_coherent(hwdev))
  85. return;
  86. if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
  87. return;
  88. __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
  89. }
  90. void __xen_dma_sync_single_for_cpu(struct device *hwdev,
  91. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  92. {
  93. if (is_device_dma_coherent(hwdev))
  94. return;
  95. __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
  96. }
  97. void __xen_dma_sync_single_for_device(struct device *hwdev,
  98. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  99. {
  100. if (is_device_dma_coherent(hwdev))
  101. return;
  102. __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
  103. }
  104. bool xen_arch_need_swiotlb(struct device *dev,
  105. unsigned long pfn,
  106. unsigned long mfn)
  107. {
  108. return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
  109. }
  110. int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
  111. unsigned int address_bits,
  112. dma_addr_t *dma_handle)
  113. {
  114. if (!xen_initial_domain())
  115. return -EINVAL;
  116. /* we assume that dom0 is mapped 1:1 for now */
  117. *dma_handle = pstart;
  118. return 0;
  119. }
  120. EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
  121. void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
  122. {
  123. return;
  124. }
  125. EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
  126. struct dma_map_ops *xen_dma_ops;
  127. EXPORT_SYMBOL_GPL(xen_dma_ops);
  128. static struct dma_map_ops xen_swiotlb_dma_ops = {
  129. .mapping_error = xen_swiotlb_dma_mapping_error,
  130. .alloc = xen_swiotlb_alloc_coherent,
  131. .free = xen_swiotlb_free_coherent,
  132. .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
  133. .sync_single_for_device = xen_swiotlb_sync_single_for_device,
  134. .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
  135. .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
  136. .map_sg = xen_swiotlb_map_sg_attrs,
  137. .unmap_sg = xen_swiotlb_unmap_sg_attrs,
  138. .map_page = xen_swiotlb_map_page,
  139. .unmap_page = xen_swiotlb_unmap_page,
  140. .dma_supported = xen_swiotlb_dma_supported,
  141. .set_dma_mask = xen_swiotlb_set_dma_mask,
  142. };
  143. int __init xen_mm_init(void)
  144. {
  145. struct gnttab_cache_flush cflush;
  146. if (!xen_initial_domain())
  147. return 0;
  148. xen_swiotlb_init(1, false);
  149. xen_dma_ops = &xen_swiotlb_dma_ops;
  150. cflush.op = 0;
  151. cflush.a.dev_bus_addr = 0;
  152. cflush.offset = 0;
  153. cflush.length = 0;
  154. if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
  155. hypercall_cflush = true;
  156. return 0;
  157. }
  158. arch_initcall(xen_mm_init);