|
@@ -20,9 +20,46 @@
|
|
|
#include <linux/cache.h>
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
|
|
+static inline void __dma_sync_for_device(void *vaddr, size_t size,
|
|
|
+ enum dma_data_direction direction)
|
|
|
+{
|
|
|
+ switch (direction) {
|
|
|
+ case DMA_FROM_DEVICE:
|
|
|
+ invalidate_dcache_range((unsigned long)vaddr,
|
|
|
+ (unsigned long)(vaddr + size));
|
|
|
+ break;
|
|
|
+ case DMA_TO_DEVICE:
|
|
|
+ /*
|
|
|
+ * We just need to flush the caches here , but Nios2 flush
|
|
|
+ * instruction will do both writeback and invalidate.
|
|
|
+ */
|
|
|
+ case DMA_BIDIRECTIONAL: /* flush and invalidate */
|
|
|
+ flush_dcache_range((unsigned long)vaddr,
|
|
|
+ (unsigned long)(vaddr + size));
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ BUG();
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
-void *dma_alloc_coherent(struct device *dev, size_t size,
|
|
|
- dma_addr_t *dma_handle, gfp_t gfp)
|
|
|
+static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
|
|
|
+ enum dma_data_direction direction)
|
|
|
+{
|
|
|
+ switch (direction) {
|
|
|
+ case DMA_BIDIRECTIONAL:
|
|
|
+ case DMA_FROM_DEVICE:
|
|
|
+ invalidate_dcache_range((unsigned long)vaddr,
|
|
|
+ (unsigned long)(vaddr + size));
|
|
|
+ break;
|
|
|
+ case DMA_TO_DEVICE:
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ BUG();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void *nios2_dma_alloc(struct device *dev, size_t size,
|
|
|
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
|
|
|
{
|
|
|
void *ret;
|
|
|
|
|
@@ -45,24 +82,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_alloc_coherent);
|
|
|
|
|
|
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|
|
- dma_addr_t dma_handle)
|
|
|
+static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
|
|
|
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
|
|
|
{
|
|
|
unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
|
|
|
|
|
|
free_pages(addr, get_order(size));
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_free_coherent);
|
|
|
|
|
|
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
- enum dma_data_direction direction)
|
|
|
+static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|
|
+ int nents, enum dma_data_direction direction,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
-
|
|
|
for_each_sg(sg, sg, nents, i) {
|
|
|
void *addr;
|
|
|
|
|
@@ -75,40 +109,32 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
|
|
|
return nents;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_map_sg);
|
|
|
|
|
|
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|
|
+static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
|
|
|
unsigned long offset, size_t size,
|
|
|
- enum dma_data_direction direction)
|
|
|
+ enum dma_data_direction direction,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
{
|
|
|
- void *addr;
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
+ void *addr = page_address(page) + offset;
|
|
|
|
|
|
- addr = page_address(page) + offset;
|
|
|
__dma_sync_for_device(addr, size, direction);
|
|
|
-
|
|
|
return page_to_phys(page) + offset;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_map_page);
|
|
|
|
|
|
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
|
|
|
- enum dma_data_direction direction)
|
|
|
+static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
|
|
|
+ size_t size, enum dma_data_direction direction,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
{
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
-
|
|
|
__dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_unmap_page);
|
|
|
|
|
|
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
|
|
- enum dma_data_direction direction)
|
|
|
+static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|
|
+ int nhwentries, enum dma_data_direction direction,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
{
|
|
|
void *addr;
|
|
|
int i;
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
-
|
|
|
if (direction == DMA_TO_DEVICE)
|
|
|
return;
|
|
|
|
|
@@ -118,69 +144,54 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
|
|
__dma_sync_for_cpu(addr, sg->length, direction);
|
|
|
}
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_unmap_sg);
|
|
|
-
|
|
|
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
|
|
- size_t size, enum dma_data_direction direction)
|
|
|
-{
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
|
|
|
- __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
|
|
|
-
|
|
|
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
|
|
|
- size_t size, enum dma_data_direction direction)
|
|
|
-{
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
-
|
|
|
- __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(dma_sync_single_for_device);
|
|
|
-
|
|
|
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
|
|
- unsigned long offset, size_t size,
|
|
|
- enum dma_data_direction direction)
|
|
|
+static void nios2_dma_sync_single_for_cpu(struct device *dev,
|
|
|
+ dma_addr_t dma_handle, size_t size,
|
|
|
+ enum dma_data_direction direction)
|
|
|
{
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
-
|
|
|
__dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
|
|
|
|
|
|
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
|
|
|
- unsigned long offset, size_t size,
|
|
|
- enum dma_data_direction direction)
|
|
|
+static void nios2_dma_sync_single_for_device(struct device *dev,
|
|
|
+ dma_addr_t dma_handle, size_t size,
|
|
|
+ enum dma_data_direction direction)
|
|
|
{
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
-
|
|
|
__dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
|
|
|
|
|
|
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
|
|
|
- enum dma_data_direction direction)
|
|
|
+static void nios2_dma_sync_sg_for_cpu(struct device *dev,
|
|
|
+ struct scatterlist *sg, int nelems,
|
|
|
+ enum dma_data_direction direction)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
-
|
|
|
/* Make sure that gcc doesn't leave the empty loop body. */
|
|
|
for_each_sg(sg, sg, nelems, i)
|
|
|
__dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
|
|
|
|
|
|
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|
|
- int nelems, enum dma_data_direction direction)
|
|
|
+static void nios2_dma_sync_sg_for_device(struct device *dev,
|
|
|
+ struct scatterlist *sg, int nelems,
|
|
|
+ enum dma_data_direction direction)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
-
|
|
|
/* Make sure that gcc doesn't leave the empty loop body. */
|
|
|
for_each_sg(sg, sg, nelems, i)
|
|
|
__dma_sync_for_device(sg_virt(sg), sg->length, direction);
|
|
|
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dma_sync_sg_for_device);
|
|
|
+
|
|
|
+struct dma_map_ops nios2_dma_ops = {
|
|
|
+ .alloc = nios2_dma_alloc,
|
|
|
+ .free = nios2_dma_free,
|
|
|
+ .map_page = nios2_dma_map_page,
|
|
|
+ .unmap_page = nios2_dma_unmap_page,
|
|
|
+ .map_sg = nios2_dma_map_sg,
|
|
|
+ .unmap_sg = nios2_dma_unmap_sg,
|
|
|
+ .sync_single_for_device = nios2_dma_sync_single_for_device,
|
|
|
+ .sync_single_for_cpu = nios2_dma_sync_single_for_cpu,
|
|
|
+ .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu,
|
|
|
+ .sync_sg_for_device = nios2_dma_sync_sg_for_device,
|
|
|
+};
|
|
|
+EXPORT_SYMBOL(nios2_dma_ops);
|