|
@@ -16,26 +16,25 @@
|
|
*/
|
|
*/
|
|
|
|
|
|
#include <linux/dma-contiguous.h>
|
|
#include <linux/dma-contiguous.h>
|
|
|
|
+#include <linux/dma-noncoherent.h>
|
|
#include <linux/dma-direct.h>
|
|
#include <linux/dma-direct.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mm.h>
|
|
-#include <linux/module.h>
|
|
|
|
-#include <linux/pci.h>
|
|
|
|
-#include <linux/string.h>
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/types.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/io.h>
|
|
#include <asm/io.h>
|
|
|
|
+#include <asm/platform.h>
|
|
|
|
|
|
-static void do_cache_op(dma_addr_t dma_handle, size_t size,
|
|
|
|
|
|
+static void do_cache_op(phys_addr_t paddr, size_t size,
|
|
void (*fn)(unsigned long, unsigned long))
|
|
void (*fn)(unsigned long, unsigned long))
|
|
{
|
|
{
|
|
- unsigned long off = dma_handle & (PAGE_SIZE - 1);
|
|
|
|
- unsigned long pfn = PFN_DOWN(dma_handle);
|
|
|
|
|
|
+ unsigned long off = paddr & (PAGE_SIZE - 1);
|
|
|
|
+ unsigned long pfn = PFN_DOWN(paddr);
|
|
struct page *page = pfn_to_page(pfn);
|
|
struct page *page = pfn_to_page(pfn);
|
|
|
|
|
|
if (!PageHighMem(page))
|
|
if (!PageHighMem(page))
|
|
- fn((unsigned long)bus_to_virt(dma_handle), size);
|
|
|
|
|
|
+ fn((unsigned long)phys_to_virt(paddr), size);
|
|
else
|
|
else
|
|
while (size > 0) {
|
|
while (size > 0) {
|
|
size_t sz = min_t(size_t, size, PAGE_SIZE - off);
|
|
size_t sz = min_t(size_t, size, PAGE_SIZE - off);
|
|
@@ -49,14 +48,13 @@ static void do_cache_op(dma_addr_t dma_handle, size_t size,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void xtensa_sync_single_for_cpu(struct device *dev,
|
|
|
|
- dma_addr_t dma_handle, size_t size,
|
|
|
|
- enum dma_data_direction dir)
|
|
|
|
|
|
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
{
|
|
{
|
|
switch (dir) {
|
|
switch (dir) {
|
|
case DMA_BIDIRECTIONAL:
|
|
case DMA_BIDIRECTIONAL:
|
|
case DMA_FROM_DEVICE:
|
|
case DMA_FROM_DEVICE:
|
|
- do_cache_op(dma_handle, size, __invalidate_dcache_range);
|
|
|
|
|
|
+ do_cache_op(paddr, size, __invalidate_dcache_range);
|
|
break;
|
|
break;
|
|
|
|
|
|
case DMA_NONE:
|
|
case DMA_NONE:
|
|
@@ -68,15 +66,14 @@ static void xtensa_sync_single_for_cpu(struct device *dev,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void xtensa_sync_single_for_device(struct device *dev,
|
|
|
|
- dma_addr_t dma_handle, size_t size,
|
|
|
|
- enum dma_data_direction dir)
|
|
|
|
|
|
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
{
|
|
{
|
|
switch (dir) {
|
|
switch (dir) {
|
|
case DMA_BIDIRECTIONAL:
|
|
case DMA_BIDIRECTIONAL:
|
|
case DMA_TO_DEVICE:
|
|
case DMA_TO_DEVICE:
|
|
if (XCHAL_DCACHE_IS_WRITEBACK)
|
|
if (XCHAL_DCACHE_IS_WRITEBACK)
|
|
- do_cache_op(dma_handle, size, __flush_dcache_range);
|
|
|
|
|
|
+ do_cache_op(paddr, size, __flush_dcache_range);
|
|
break;
|
|
break;
|
|
|
|
|
|
case DMA_NONE:
|
|
case DMA_NONE:
|
|
@@ -88,43 +85,66 @@ static void xtensa_sync_single_for_device(struct device *dev,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void xtensa_sync_sg_for_cpu(struct device *dev,
|
|
|
|
- struct scatterlist *sg, int nents,
|
|
|
|
- enum dma_data_direction dir)
|
|
|
|
|
|
+#ifdef CONFIG_MMU
|
|
|
|
+bool platform_vaddr_cached(const void *p)
|
|
{
|
|
{
|
|
- struct scatterlist *s;
|
|
|
|
- int i;
|
|
|
|
|
|
+ unsigned long addr = (unsigned long)p;
|
|
|
|
|
|
- for_each_sg(sg, s, nents, i) {
|
|
|
|
- xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
|
|
|
|
- sg_dma_len(s), dir);
|
|
|
|
- }
|
|
|
|
|
|
+ return addr >= XCHAL_KSEG_CACHED_VADDR &&
|
|
|
|
+ addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
|
|
}
|
|
}
|
|
|
|
|
|
-static void xtensa_sync_sg_for_device(struct device *dev,
|
|
|
|
- struct scatterlist *sg, int nents,
|
|
|
|
- enum dma_data_direction dir)
|
|
|
|
|
|
+bool platform_vaddr_uncached(const void *p)
|
|
{
|
|
{
|
|
- struct scatterlist *s;
|
|
|
|
- int i;
|
|
|
|
|
|
+ unsigned long addr = (unsigned long)p;
|
|
|
|
|
|
- for_each_sg(sg, s, nents, i) {
|
|
|
|
- xtensa_sync_single_for_device(dev, sg_dma_address(s),
|
|
|
|
- sg_dma_len(s), dir);
|
|
|
|
- }
|
|
|
|
|
|
+ return addr >= XCHAL_KSEG_BYPASS_VADDR &&
|
|
|
|
+ addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void *platform_vaddr_to_uncached(void *p)
|
|
|
|
+{
|
|
|
|
+ return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void *platform_vaddr_to_cached(void *p)
|
|
|
|
+{
|
|
|
|
+ return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+bool __attribute__((weak)) platform_vaddr_cached(const void *p)
|
|
|
|
+{
|
|
|
|
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
|
|
|
|
+{
|
|
|
|
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
|
|
|
|
+{
|
|
|
|
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
|
|
|
|
+ return p;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
|
|
|
|
+{
|
|
|
|
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
|
|
|
|
+ return p;
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
/*
|
|
* Note: We assume that the full memory space is always mapped to 'kseg'
|
|
* Note: We assume that the full memory space is always mapped to 'kseg'
|
|
* Otherwise we have to use page attributes (not implemented).
|
|
* Otherwise we have to use page attributes (not implemented).
|
|
*/
|
|
*/
|
|
|
|
|
|
-static void *xtensa_dma_alloc(struct device *dev, size_t size,
|
|
|
|
- dma_addr_t *handle, gfp_t flag,
|
|
|
|
- unsigned long attrs)
|
|
|
|
|
|
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|
|
|
+ gfp_t flag, unsigned long attrs)
|
|
{
|
|
{
|
|
- unsigned long ret;
|
|
|
|
- unsigned long uncached;
|
|
|
|
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
struct page *page = NULL;
|
|
struct page *page = NULL;
|
|
|
|
|
|
@@ -147,6 +167,10 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
|
|
|
|
|
|
*handle = phys_to_dma(dev, page_to_phys(page));
|
|
*handle = phys_to_dma(dev, page_to_phys(page));
|
|
|
|
|
|
|
|
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
|
|
|
+ return page;
|
|
|
|
+ }
|
|
|
|
+
|
|
#ifdef CONFIG_MMU
|
|
#ifdef CONFIG_MMU
|
|
if (PageHighMem(page)) {
|
|
if (PageHighMem(page)) {
|
|
void *p;
|
|
void *p;
|
|
@@ -161,27 +185,21 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
|
|
return p;
|
|
return p;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
- ret = (unsigned long)page_address(page);
|
|
|
|
- BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
|
|
|
|
- ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
|
|
|
|
-
|
|
|
|
- uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
|
|
|
|
- __invalidate_dcache_range(ret, size);
|
|
|
|
-
|
|
|
|
- return (void *)uncached;
|
|
|
|
|
|
+ BUG_ON(!platform_vaddr_cached(page_address(page)));
|
|
|
|
+ __invalidate_dcache_range((unsigned long)page_address(page), size);
|
|
|
|
+ return platform_vaddr_to_uncached(page_address(page));
|
|
}
|
|
}
|
|
|
|
|
|
-static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
|
|
|
|
- dma_addr_t dma_handle, unsigned long attrs)
|
|
|
|
|
|
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|
|
|
+ dma_addr_t dma_handle, unsigned long attrs)
|
|
{
|
|
{
|
|
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
- unsigned long addr = (unsigned long)vaddr;
|
|
|
|
struct page *page;
|
|
struct page *page;
|
|
|
|
|
|
- if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
|
|
|
|
- addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
|
|
|
|
- addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
|
|
|
|
- page = virt_to_page(addr);
|
|
|
|
|
|
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
|
|
|
+ page = vaddr;
|
|
|
|
+ } else if (platform_vaddr_uncached(vaddr)) {
|
|
|
|
+ page = virt_to_page(platform_vaddr_to_cached(vaddr));
|
|
} else {
|
|
} else {
|
|
#ifdef CONFIG_MMU
|
|
#ifdef CONFIG_MMU
|
|
dma_common_free_remap(vaddr, size, VM_MAP);
|
|
dma_common_free_remap(vaddr, size, VM_MAP);
|
|
@@ -192,72 +210,3 @@ static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
|
|
if (!dma_release_from_contiguous(dev, page, count))
|
|
if (!dma_release_from_contiguous(dev, page, count))
|
|
__free_pages(page, get_order(size));
|
|
__free_pages(page, get_order(size));
|
|
}
|
|
}
|
|
-
|
|
|
|
-static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
|
|
|
|
- unsigned long offset, size_t size,
|
|
|
|
- enum dma_data_direction dir,
|
|
|
|
- unsigned long attrs)
|
|
|
|
-{
|
|
|
|
- dma_addr_t dma_handle = page_to_phys(page) + offset;
|
|
|
|
-
|
|
|
|
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
|
- xtensa_sync_single_for_device(dev, dma_handle, size, dir);
|
|
|
|
-
|
|
|
|
- return dma_handle;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
|
|
|
- size_t size, enum dma_data_direction dir,
|
|
|
|
- unsigned long attrs)
|
|
|
|
-{
|
|
|
|
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
|
- xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
- int nents, enum dma_data_direction dir,
|
|
|
|
- unsigned long attrs)
|
|
|
|
-{
|
|
|
|
- struct scatterlist *s;
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- for_each_sg(sg, s, nents, i) {
|
|
|
|
- s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
|
|
|
|
- s->length, dir, attrs);
|
|
|
|
- }
|
|
|
|
- return nents;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void xtensa_unmap_sg(struct device *dev,
|
|
|
|
- struct scatterlist *sg, int nents,
|
|
|
|
- enum dma_data_direction dir,
|
|
|
|
- unsigned long attrs)
|
|
|
|
-{
|
|
|
|
- struct scatterlist *s;
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- for_each_sg(sg, s, nents, i) {
|
|
|
|
- xtensa_unmap_page(dev, sg_dma_address(s),
|
|
|
|
- sg_dma_len(s), dir, attrs);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
|
-{
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-const struct dma_map_ops xtensa_dma_map_ops = {
|
|
|
|
- .alloc = xtensa_dma_alloc,
|
|
|
|
- .free = xtensa_dma_free,
|
|
|
|
- .map_page = xtensa_map_page,
|
|
|
|
- .unmap_page = xtensa_unmap_page,
|
|
|
|
- .map_sg = xtensa_map_sg,
|
|
|
|
- .unmap_sg = xtensa_unmap_sg,
|
|
|
|
- .sync_single_for_cpu = xtensa_sync_single_for_cpu,
|
|
|
|
- .sync_single_for_device = xtensa_sync_single_for_device,
|
|
|
|
- .sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
|
|
|
|
- .sync_sg_for_device = xtensa_sync_sg_for_device,
|
|
|
|
- .mapping_error = xtensa_dma_mapping_error,
|
|
|
|
-};
|
|
|
|
-EXPORT_SYMBOL(xtensa_dma_map_ops);
|
|
|