|
@@ -16,6 +16,8 @@
|
|
|
#include <linux/bootmem.h>
|
|
|
#include <linux/spinlock.h>
|
|
|
#include <linux/gfp.h>
|
|
|
+#include <linux/dma-direct.h>
|
|
|
+#include <linux/dma-noncoherent.h>
|
|
|
#include <asm/mipsregs.h>
|
|
|
#include <asm/jazz.h>
|
|
|
#include <asm/io.h>
|
|
@@ -86,6 +88,7 @@ static int __init vdma_init(void)
|
|
|
printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
|
|
|
return 0;
|
|
|
}
|
|
|
+arch_initcall(vdma_init);
|
|
|
|
|
|
/*
|
|
|
* Allocate DMA pagetables using a simple first-fit algorithm
|
|
@@ -556,4 +559,140 @@ int vdma_get_enable(int channel)
|
|
|
return enable;
|
|
|
}
|
|
|
|
|
|
-arch_initcall(vdma_init);
|
|
|
+static void *jazz_dma_alloc(struct device *dev, size_t size,
|
|
|
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
|
|
+{
|
|
|
+ void *ret;
|
|
|
+
|
|
|
+ ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
|
|
|
+ if (!ret)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ *dma_handle = vdma_alloc(virt_to_phys(ret), size);
|
|
|
+ if (*dma_handle == VDMA_ERROR) {
|
|
|
+ dma_direct_free(dev, size, ret, *dma_handle, attrs);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT)) {
|
|
|
+ dma_cache_wback_inv((unsigned long)ret, size);
|
|
|
+ ret = UNCAC_ADDR(ret);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
|
|
|
+ dma_addr_t dma_handle, unsigned long attrs)
|
|
|
+{
|
|
|
+ vdma_free(dma_handle);
|
|
|
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT))
|
|
|
+ vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
|
|
|
+ return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
|
|
|
+}
|
|
|
+
|
|
|
+static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
|
|
|
+ unsigned long offset, size_t size, enum dma_data_direction dir,
|
|
|
+ unsigned long attrs)
|
|
|
+{
|
|
|
+ phys_addr_t phys = page_to_phys(page) + offset;
|
|
|
+
|
|
|
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
+ arch_sync_dma_for_device(dev, phys, size, dir);
|
|
|
+ return vdma_alloc(phys, size);
|
|
|
+}
|
|
|
+
|
|
|
+static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
|
|
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
|
|
|
+{
|
|
|
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
+ arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
|
|
|
+ vdma_free(dma_addr);
|
|
|
+}
|
|
|
+
|
|
|
+static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
|
|
+ int nents, enum dma_data_direction dir, unsigned long attrs)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct scatterlist *sg;
|
|
|
+
|
|
|
+ for_each_sg(sglist, sg, nents, i) {
|
|
|
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
|
|
|
+ dir);
|
|
|
+ sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
|
|
|
+ if (sg->dma_address == VDMA_ERROR)
|
|
|
+ return 0;
|
|
|
+ sg_dma_len(sg) = sg->length;
|
|
|
+ }
|
|
|
+
|
|
|
+ return nents;
|
|
|
+}
|
|
|
+
|
|
|
+static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|
|
+ int nents, enum dma_data_direction dir, unsigned long attrs)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct scatterlist *sg;
|
|
|
+
|
|
|
+ for_each_sg(sglist, sg, nents, i) {
|
|
|
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
|
|
|
+ dir);
|
|
|
+ vdma_free(sg->dma_address);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void jazz_dma_sync_single_for_device(struct device *dev,
|
|
|
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static void jazz_dma_sync_single_for_cpu(struct device *dev,
|
|
|
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static void jazz_dma_sync_sg_for_device(struct device *dev,
|
|
|
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ struct scatterlist *sg;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_sg(sgl, sg, nents, i)
|
|
|
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static void jazz_dma_sync_sg_for_cpu(struct device *dev,
|
|
|
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ struct scatterlist *sg;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_sg(sgl, sg, nents, i)
|
|
|
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
+{
|
|
|
+ return dma_addr == VDMA_ERROR;
|
|
|
+}
|
|
|
+
|
|
|
+const struct dma_map_ops jazz_dma_ops = {
|
|
|
+ .alloc = jazz_dma_alloc,
|
|
|
+ .free = jazz_dma_free,
|
|
|
+ .mmap = arch_dma_mmap,
|
|
|
+ .map_page = jazz_dma_map_page,
|
|
|
+ .unmap_page = jazz_dma_unmap_page,
|
|
|
+ .map_sg = jazz_dma_map_sg,
|
|
|
+ .unmap_sg = jazz_dma_unmap_sg,
|
|
|
+ .sync_single_for_cpu = jazz_dma_sync_single_for_cpu,
|
|
|
+ .sync_single_for_device = jazz_dma_sync_single_for_device,
|
|
|
+ .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
|
|
|
+ .sync_sg_for_device = jazz_dma_sync_sg_for_device,
|
|
|
+ .dma_supported = dma_direct_supported,
|
|
|
+ .cache_sync = arch_dma_cache_sync,
|
|
|
+ .mapping_error = jazz_dma_mapping_error,
|
|
|
+};
|
|
|
+EXPORT_SYMBOL(jazz_dma_ops);
|