|
|
@@ -1,6 +1,10 @@
|
|
|
+#include <linux/cpu.h>
|
|
|
+#include <linux/dma-mapping.h>
|
|
|
#include <linux/bootmem.h>
|
|
|
#include <linux/gfp.h>
|
|
|
+#include <linux/highmem.h>
|
|
|
#include <linux/export.h>
|
|
|
+#include <linux/of_address.h>
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/types.h>
|
|
|
#include <linux/dma-mapping.h>
|
|
|
@@ -16,6 +20,86 @@
|
|
|
#include <asm/xen/hypercall.h>
|
|
|
#include <asm/xen/interface.h>
|
|
|
|
|
|
+enum dma_cache_op {
|
|
|
+ DMA_UNMAP,
|
|
|
+ DMA_MAP,
|
|
|
+};
|
|
|
+
|
|
|
+/* functions called by SWIOTLB */
|
|
|
+
|
|
|
+static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
|
|
|
+ size_t size, enum dma_data_direction dir, enum dma_cache_op op)
|
|
|
+{
|
|
|
+ unsigned long pfn;
|
|
|
+ size_t left = size;
|
|
|
+
|
|
|
+ pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
|
|
|
+ offset %= PAGE_SIZE;
|
|
|
+
|
|
|
+ do {
|
|
|
+ size_t len = left;
|
|
|
+
|
|
|
+ /* TODO: cache flush */
|
|
|
+
|
|
|
+ offset = 0;
|
|
|
+ pfn++;
|
|
|
+ left -= len;
|
|
|
+ } while (left);
|
|
|
+}
|
|
|
+
|
|
|
+static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
|
|
|
+}
|
|
|
+
|
|
|
+static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
|
|
|
+}
|
|
|
+
|
|
|
+void __xen_dma_map_page(struct device *hwdev, struct page *page,
|
|
|
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
|
|
|
+ enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ if (is_device_dma_coherent(hwdev))
|
|
|
+ return;
|
|
|
+ if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
+ return;
|
|
|
+
|
|
|
+ __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|
|
+ size_t size, enum dma_data_direction dir,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
+
|
|
|
+{
|
|
|
+ if (is_device_dma_coherent(hwdev))
|
|
|
+ return;
|
|
|
+ if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
+ return;
|
|
|
+
|
|
|
+ __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
|
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ if (is_device_dma_coherent(hwdev))
|
|
|
+ return;
|
|
|
+ __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+void __xen_dma_sync_single_for_device(struct device *hwdev,
|
|
|
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ if (is_device_dma_coherent(hwdev))
|
|
|
+ return;
|
|
|
+ __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
|
|
|
+}
|
|
|
+
|
|
|
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
|
|
|
unsigned int address_bits,
|
|
|
dma_addr_t *dma_handle)
|