Browse Source

Merge tag 'dma-mapping-4.15' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - turn dma_cache_sync into a dma_map_ops instance and remove
   implementation that purely are dead because the architecture doesn't
   support noncoherent allocations

 - add a flag for busses that need DMA configuration (Robin Murphy)

* tag 'dma-mapping-4.15' of git://git.infradead.org/users/hch/dma-mapping:
  dma-mapping: turn dma_cache_sync into a dma_map_ops method
  sh: make dma_cache_sync a no-op
  xtensa: make dma_cache_sync a no-op
  unicore32: make dma_cache_sync a no-op
  powerpc: make dma_cache_sync a no-op
  mn10300: make dma_cache_sync a no-op
  microblaze: make dma_cache_sync a no-op
  ia64: make dma_cache_sync a no-op
  frv: make dma_cache_sync a no-op
  x86: make dma_cache_sync a no-op
  floppy: consolidate the dummy fd_cacheflush definition
  drivers: flag buses which demand DMA configuration
Linus Torvalds 7 years ago
parent
commit
e37e0ee019
42 changed files with 71 additions and 250 deletions
  1. 0 2
      arch/alpha/include/asm/dma-mapping.h
  2. 0 2
      arch/alpha/include/asm/floppy.h
  3. 0 6
      arch/cris/include/asm/dma-mapping.h
  4. 0 7
      arch/frv/include/asm/dma-mapping.h
  5. 0 3
      arch/hexagon/include/asm/dma-mapping.h
  6. 0 11
      arch/ia64/include/asm/dma-mapping.h
  7. 0 5
      arch/m32r/include/asm/dma-mapping.h
  8. 0 6
      arch/m68k/include/asm/dma-mapping.h
  9. 0 10
      arch/metag/include/asm/dma-mapping.h
  10. 0 39
      arch/microblaze/include/asm/dma-mapping.h
  11. 17 0
      arch/microblaze/kernel/dma.c
  12. 0 3
      arch/mips/include/asm/dma-mapping.h
  13. 3 4
      arch/mips/mm/dma-default.c
  14. 0 10
      arch/mn10300/include/asm/dma-mapping.h
  15. 0 9
      arch/nios2/include/asm/dma-mapping.h
  16. 0 8
      arch/parisc/include/asm/dma-mapping.h
  17. 8 0
      arch/parisc/kernel/pci-dma.c
  18. 0 7
      arch/powerpc/include/asm/dma-mapping.h
  19. 0 2
      arch/powerpc/include/asm/floppy.h
  20. 0 5
      arch/s390/include/asm/dma-mapping.h
  21. 3 4
      arch/sh/include/asm/dma-mapping.h
  22. 9 8
      arch/sh/kernel/dma-nommu.c
  23. 3 3
      arch/sh/mm/consistent.c
  24. 0 8
      arch/sparc/include/asm/dma-mapping.h
  25. 0 1
      arch/sparc/include/asm/floppy_32.h
  26. 0 1
      arch/sparc/include/asm/floppy_64.h
  27. 0 9
      arch/tile/include/asm/dma-mapping.h
  28. 0 9
      arch/unicore32/include/asm/cacheflush.h
  29. 0 22
      arch/unicore32/include/asm/dma-mapping.h
  30. 0 3
      arch/unicore32/mm/proc-syms.c
  31. 0 7
      arch/x86/include/asm/dma-mapping.h
  32. 0 3
      arch/xtensa/include/asm/dma-mapping.h
  33. 0 23
      arch/xtensa/kernel/pci-dma.c
  34. 1 0
      drivers/amba/bus.c
  35. 1 0
      drivers/base/platform.c
  36. 4 0
      drivers/block/floppy.c
  37. 1 0
      drivers/gpu/host1x/bus.c
  38. 1 7
      drivers/of/device.c
  39. 1 0
      drivers/pci/pci-driver.c
  40. 2 3
      drivers/sh/maple/maple.c
  41. 4 0
      include/linux/device.h
  42. 13 0
      include/linux/dma-mapping.h

+ 0 - 2
arch/alpha/include/asm/dma-mapping.h

@@ -9,6 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return dma_ops;
 	return dma_ops;
 }
 }
 
 
-#define dma_cache_sync(dev, va, size, dir)		  ((void)0)
-
 #endif	/* _ALPHA_DMA_MAPPING_H */
 #endif	/* _ALPHA_DMA_MAPPING_H */

+ 0 - 2
arch/alpha/include/asm/floppy.h

@@ -24,7 +24,6 @@
 #define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
 #define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
 #define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
 #define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
 #define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
 #define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
-#define fd_cacheflush(addr,size) /* nothing */
 #define fd_request_irq()        request_irq(FLOPPY_IRQ, floppy_interrupt,\
 #define fd_request_irq()        request_irq(FLOPPY_IRQ, floppy_interrupt,\
 					    0, "floppy", NULL)
 					    0, "floppy", NULL)
 #define fd_free_irq()           free_irq(FLOPPY_IRQ, NULL)
 #define fd_free_irq()           free_irq(FLOPPY_IRQ, NULL)
@@ -62,7 +61,6 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
 	prev_dir = dir;
 	prev_dir = dir;
 
 
 	fd_clear_dma_ff();
 	fd_clear_dma_ff();
-	fd_cacheflush(addr, size);
 	fd_set_dma_mode(mode);
 	fd_set_dma_mode(mode);
 	set_dma_addr(FLOPPY_DMA, bus_addr);
 	set_dma_addr(FLOPPY_DMA, bus_addr);
 	fd_set_dma_count(size);
 	fd_set_dma_count(size);

+ 0 - 6
arch/cris/include/asm/dma-mapping.h

@@ -17,10 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 }
 }
 #endif
 #endif
 
 
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-	       enum dma_data_direction direction)
-{
-}
-
 #endif
 #endif

+ 0 - 7
arch/frv/include/asm/dma-mapping.h

@@ -15,11 +15,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return &frv_dma_ops;
 	return &frv_dma_ops;
 }
 }
 
 
-static inline
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-		    enum dma_data_direction direction)
-{
-	flush_write_buffers();
-}
-
 #endif  /* _ASM_DMA_MAPPING_H */
 #endif  /* _ASM_DMA_MAPPING_H */

+ 0 - 3
arch/hexagon/include/asm/dma-mapping.h

@@ -37,9 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return dma_ops;
 	return dma_ops;
 }
 }
 
 
-extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-			   enum dma_data_direction direction);
-
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
 {
 	if (!dev->dma_mask)
 	if (!dev->dma_mask)

+ 0 - 11
arch/ia64/include/asm/dma-mapping.h

@@ -45,15 +45,4 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 	return daddr;
 	return daddr;
 }
 }
 
 
-static inline void
-dma_cache_sync (struct device *dev, void *vaddr, size_t size,
-	enum dma_data_direction dir)
-{
-	/*
-	 * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
-	 * ensure that dma_cache_sync() enforces order, hence the mb().
-	 */
-	mb();
-}
-
 #endif /* _ASM_IA64_DMA_MAPPING_H */
 #endif /* _ASM_IA64_DMA_MAPPING_H */

+ 0 - 5
arch/m32r/include/asm/dma-mapping.h

@@ -14,11 +14,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return &dma_noop_ops;
 	return &dma_noop_ops;
 }
 }
 
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-				  enum dma_data_direction direction)
-{
-}
-
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
 {
 	if (!dev->dma_mask)
 	if (!dev->dma_mask)

+ 0 - 6
arch/m68k/include/asm/dma-mapping.h

@@ -9,10 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
         return &m68k_dma_ops;
         return &m68k_dma_ops;
 }
 }
 
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-				  enum dma_data_direction dir)
-{
-	/* we use coherent allocation, so not much to do here. */
-}
-
 #endif  /* _M68K_DMA_MAPPING_H */
 #endif  /* _M68K_DMA_MAPPING_H */

+ 0 - 10
arch/metag/include/asm/dma-mapping.h

@@ -9,14 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return &metag_dma_ops;
 	return &metag_dma_ops;
 }
 }
 
 
-/*
- * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
- * do any flushing here.
- */
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-	       enum dma_data_direction direction)
-{
-}
-
 #endif
 #endif

+ 0 - 39
arch/microblaze/include/asm/dma-mapping.h

@@ -15,22 +15,6 @@
 #ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
 #ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
 #define _ASM_MICROBLAZE_DMA_MAPPING_H
 #define _ASM_MICROBLAZE_DMA_MAPPING_H
 
 
-/*
- * See Documentation/DMA-API-HOWTO.txt and
- * Documentation/DMA-API.txt for documentation.
- */
-
-#include <linux/types.h>
-#include <linux/cache.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <asm/io.h>
-#include <asm/cacheflush.h>
-
-#define __dma_alloc_coherent(dev, gfp, size, handle)	NULL
-#define __dma_free_coherent(size, addr)		((void)0)
-
 /*
 /*
  * Available generic sets of operations
  * Available generic sets of operations
  */
  */
@@ -41,27 +25,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return &dma_direct_ops;
 	return &dma_direct_ops;
 }
 }
 
 
-static inline void __dma_sync(unsigned long paddr,
-			      size_t size, enum dma_data_direction direction)
-{
-	switch (direction) {
-	case DMA_TO_DEVICE:
-	case DMA_BIDIRECTIONAL:
-		flush_dcache_range(paddr, paddr + size);
-		break;
-	case DMA_FROM_DEVICE:
-		invalidate_dcache_range(paddr, paddr + size);
-		break;
-	default:
-		BUG();
-	}
-}
-
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-		enum dma_data_direction direction)
-{
-	BUG_ON(direction == DMA_NONE);
-	__dma_sync(virt_to_phys(vaddr), size, (int)direction);
-}
-
 #endif	/* _ASM_MICROBLAZE_DMA_MAPPING_H */
 #endif	/* _ASM_MICROBLAZE_DMA_MAPPING_H */

+ 17 - 0
arch/microblaze/kernel/dma.c

@@ -13,6 +13,7 @@
 #include <linux/dma-debug.h>
 #include <linux/dma-debug.h>
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/bug.h>
 #include <linux/bug.h>
+#include <asm/cacheflush.h>
 
 
 #define NOT_COHERENT_CACHE
 #define NOT_COHERENT_CACHE
 
 
@@ -52,6 +53,22 @@ static void dma_direct_free_coherent(struct device *dev, size_t size,
 #endif
 #endif
 }
 }
 
 
+static inline void __dma_sync(unsigned long paddr,
+			      size_t size, enum dma_data_direction direction)
+{
+	switch (direction) {
+	case DMA_TO_DEVICE:
+	case DMA_BIDIRECTIONAL:
+		flush_dcache_range(paddr, paddr + size);
+		break;
+	case DMA_FROM_DEVICE:
+		invalidate_dcache_range(paddr, paddr + size);
+		break;
+	default:
+		BUG();
+	}
+}
+
 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
 			     int nents, enum dma_data_direction direction,
 			     int nents, enum dma_data_direction direction,
 			     unsigned long attrs)
 			     unsigned long attrs)

+ 0 - 3
arch/mips/include/asm/dma-mapping.h

@@ -27,9 +27,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 
 
 static inline void dma_mark_clean(void *addr, size_t size) {}
 static inline void dma_mark_clean(void *addr, size_t size) {}
 
 
-extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-	       enum dma_data_direction direction);
-
 #define arch_setup_dma_ops arch_setup_dma_ops
 #define arch_setup_dma_ops arch_setup_dma_ops
 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
 				      u64 size, const struct iommu_ops *iommu,
 				      u64 size, const struct iommu_ops *iommu,

+ 3 - 4
arch/mips/mm/dma-default.c

@@ -383,7 +383,7 @@ static int mips_dma_supported(struct device *dev, u64 mask)
 	return plat_dma_supported(dev, mask);
 	return plat_dma_supported(dev, mask);
 }
 }
 
 
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 			 enum dma_data_direction direction)
 			 enum dma_data_direction direction)
 {
 {
 	BUG_ON(direction == DMA_NONE);
 	BUG_ON(direction == DMA_NONE);
@@ -392,8 +392,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 		__dma_sync_virtual(vaddr, size, direction);
 		__dma_sync_virtual(vaddr, size, direction);
 }
 }
 
 
-EXPORT_SYMBOL(dma_cache_sync);
-
 static const struct dma_map_ops mips_default_dma_map_ops = {
 static const struct dma_map_ops mips_default_dma_map_ops = {
 	.alloc = mips_dma_alloc_coherent,
 	.alloc = mips_dma_alloc_coherent,
 	.free = mips_dma_free_coherent,
 	.free = mips_dma_free_coherent,
@@ -407,7 +405,8 @@ static const struct dma_map_ops mips_default_dma_map_ops = {
 	.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
 	.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
 	.sync_sg_for_device = mips_dma_sync_sg_for_device,
 	.sync_sg_for_device = mips_dma_sync_sg_for_device,
 	.mapping_error = mips_dma_mapping_error,
 	.mapping_error = mips_dma_mapping_error,
-	.dma_supported = mips_dma_supported
+	.dma_supported = mips_dma_supported,
+	.cache_sync = mips_dma_cache_sync,
 };
 };
 
 
 const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
 const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;

+ 0 - 10
arch/mn10300/include/asm/dma-mapping.h

@@ -11,9 +11,6 @@
 #ifndef _ASM_DMA_MAPPING_H
 #ifndef _ASM_DMA_MAPPING_H
 #define _ASM_DMA_MAPPING_H
 #define _ASM_DMA_MAPPING_H
 
 
-#include <asm/cache.h>
-#include <asm/io.h>
-
 extern const struct dma_map_ops mn10300_dma_ops;
 extern const struct dma_map_ops mn10300_dma_ops;
 
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
@@ -21,11 +18,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return &mn10300_dma_ops;
 	return &mn10300_dma_ops;
 }
 }
 
 
-static inline
-void dma_cache_sync(void *vaddr, size_t size,
-		    enum dma_data_direction direction)
-{
-	mn10300_dcache_flush_inv();
-}
-
 #endif
 #endif

+ 0 - 9
arch/nios2/include/asm/dma-mapping.h

@@ -17,13 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return &nios2_dma_ops;
 	return &nios2_dma_ops;
 }
 }
 
 
-/*
- * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
- * do any flushing here.
- */
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-				  enum dma_data_direction direction)
-{
-}
-
 #endif /* _ASM_NIOS2_DMA_MAPPING_H */
 #endif /* _ASM_NIOS2_DMA_MAPPING_H */

+ 0 - 8
arch/parisc/include/asm/dma-mapping.h

@@ -33,14 +33,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return hppa_dma_ops;
 	return hppa_dma_ops;
 }
 }
 
 
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-	       enum dma_data_direction direction)
-{
-	if (hppa_dma_ops->sync_single_for_cpu)
-		flush_kernel_dcache_range((unsigned long)vaddr, size);
-}
-
 static inline void *
 static inline void *
 parisc_walk_tree(struct device *dev)
 parisc_walk_tree(struct device *dev)
 {
 {

+ 8 - 0
arch/parisc/kernel/pci-dma.c

@@ -572,6 +572,12 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
 		flush_kernel_vmap_range(sg_virt(sg), sg->length);
 		flush_kernel_vmap_range(sg_virt(sg), sg->length);
 }
 }
 
 
+static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+	       enum dma_data_direction direction)
+{
+	flush_kernel_dcache_range((unsigned long)vaddr, size);
+}
+
 const struct dma_map_ops pcxl_dma_ops = {
 const struct dma_map_ops pcxl_dma_ops = {
 	.dma_supported =	pa11_dma_supported,
 	.dma_supported =	pa11_dma_supported,
 	.alloc =		pa11_dma_alloc,
 	.alloc =		pa11_dma_alloc,
@@ -584,6 +590,7 @@ const struct dma_map_ops pcxl_dma_ops = {
 	.sync_single_for_device = pa11_dma_sync_single_for_device,
 	.sync_single_for_device = pa11_dma_sync_single_for_device,
 	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
 	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
 	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
 	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
+	.cache_sync =		pa11_dma_cache_sync,
 };
 };
 
 
 static void *pcx_dma_alloc(struct device *dev, size_t size,
 static void *pcx_dma_alloc(struct device *dev, size_t size,
@@ -620,4 +627,5 @@ const struct dma_map_ops pcx_dma_ops = {
 	.sync_single_for_device = pa11_dma_sync_single_for_device,
 	.sync_single_for_device = pa11_dma_sync_single_for_device,
 	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
 	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
 	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
 	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
+	.cache_sync =		pa11_dma_cache_sync,
 };
 };

+ 0 - 7
arch/powerpc/include/asm/dma-mapping.h

@@ -142,12 +142,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 
 
 #define ARCH_HAS_DMA_MMAP_COHERENT
 #define ARCH_HAS_DMA_MMAP_COHERENT
 
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-		enum dma_data_direction direction)
-{
-	BUG_ON(direction == DMA_NONE);
-	__dma_sync(vaddr, size, (int)direction);
-}
-
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif	/* _ASM_DMA_MAPPING_H */
 #endif	/* _ASM_DMA_MAPPING_H */

+ 0 - 2
arch/powerpc/include/asm/floppy.h

@@ -25,7 +25,6 @@
 #define fd_get_dma_residue()    fd_ops->_get_dma_residue(FLOPPY_DMA)
 #define fd_get_dma_residue()    fd_ops->_get_dma_residue(FLOPPY_DMA)
 #define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
 #define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
 #define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
 #define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
-#define fd_cacheflush(addr,size) /* nothing */
 #define fd_free_irq()           free_irq(FLOPPY_IRQ, NULL);
 #define fd_free_irq()           free_irq(FLOPPY_IRQ, NULL);
 
 
 #include <linux/pci.h>
 #include <linux/pci.h>
@@ -152,7 +151,6 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
 	prev_dir = dir;
 	prev_dir = dir;
 
 
 	fd_clear_dma_ff();
 	fd_clear_dma_ff();
-	fd_cacheflush(addr, size);
 	fd_set_dma_mode(mode);
 	fd_set_dma_mode(mode);
 	set_dma_addr(FLOPPY_DMA, bus_addr);
 	set_dma_addr(FLOPPY_DMA, bus_addr);
 	fd_set_dma_count(size);
 	fd_set_dma_count(size);

+ 0 - 5
arch/s390/include/asm/dma-mapping.h

@@ -16,11 +16,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return &dma_noop_ops;
 	return &dma_noop_ops;
 }
 }
 
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-				  enum dma_data_direction direction)
-{
-}
-
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 {
 {
 	if (!dev->dma_mask)
 	if (!dev->dma_mask)

+ 3 - 4
arch/sh/include/asm/dma-mapping.h

@@ -10,10 +10,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return dma_ops;
 	return dma_ops;
 }
 }
 
 
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-		    enum dma_data_direction dir);
-
-/* arch/sh/mm/consistent.c */
 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
 					dma_addr_t *dma_addr, gfp_t flag,
 					dma_addr_t *dma_addr, gfp_t flag,
 					unsigned long attrs);
 					unsigned long attrs);
@@ -21,4 +17,7 @@ extern void dma_generic_free_coherent(struct device *dev, size_t size,
 				      void *vaddr, dma_addr_t dma_handle,
 				      void *vaddr, dma_addr_t dma_handle,
 				      unsigned long attrs);
 				      unsigned long attrs);
 
 
+void sh_sync_dma_for_device(void *vaddr, size_t size,
+	    enum dma_data_direction dir);
+
 #endif /* __ASM_SH_DMA_MAPPING_H */
 #endif /* __ASM_SH_DMA_MAPPING_H */

+ 9 - 8
arch/sh/kernel/dma-nommu.c

@@ -9,6 +9,7 @@
  */
  */
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
 #include <linux/io.h>
+#include <asm/cacheflush.h>
 
 
 static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
 static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
 				 unsigned long offset, size_t size,
 				 unsigned long offset, size_t size,
@@ -20,7 +21,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
 	WARN_ON(size == 0);
 	WARN_ON(size == 0);
 
 
 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		dma_cache_sync(dev, page_address(page) + offset, size, dir);
+		sh_sync_dma_for_device(page_address(page) + offset, size, dir);
 
 
 	return addr;
 	return addr;
 }
 }
@@ -38,7 +39,7 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
 		BUG_ON(!sg_page(s));
 		BUG_ON(!sg_page(s));
 
 
 		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-			dma_cache_sync(dev, sg_virt(s), s->length, dir);
+			sh_sync_dma_for_device(sg_virt(s), s->length, dir);
 
 
 		s->dma_address = sg_phys(s);
 		s->dma_address = sg_phys(s);
 		s->dma_length = s->length;
 		s->dma_length = s->length;
@@ -48,20 +49,20 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
 }
 }
 
 
 #ifdef CONFIG_DMA_NONCOHERENT
 #ifdef CONFIG_DMA_NONCOHERENT
-static void nommu_sync_single(struct device *dev, dma_addr_t addr,
+static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr,
 			      size_t size, enum dma_data_direction dir)
 			      size_t size, enum dma_data_direction dir)
 {
 {
-	dma_cache_sync(dev, phys_to_virt(addr), size, dir);
+	sh_sync_dma_for_device(phys_to_virt(addr), size, dir);
 }
 }
 
 
-static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
+static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 			  int nelems, enum dma_data_direction dir)
 			  int nelems, enum dma_data_direction dir)
 {
 {
 	struct scatterlist *s;
 	struct scatterlist *s;
 	int i;
 	int i;
 
 
 	for_each_sg(sg, s, nelems, i)
 	for_each_sg(sg, s, nelems, i)
-		dma_cache_sync(dev, sg_virt(s), s->length, dir);
+		sh_sync_dma_for_device(sg_virt(s), s->length, dir);
 }
 }
 #endif
 #endif
 
 
@@ -71,8 +72,8 @@ const struct dma_map_ops nommu_dma_ops = {
 	.map_page		= nommu_map_page,
 	.map_page		= nommu_map_page,
 	.map_sg			= nommu_map_sg,
 	.map_sg			= nommu_map_sg,
 #ifdef CONFIG_DMA_NONCOHERENT
 #ifdef CONFIG_DMA_NONCOHERENT
-	.sync_single_for_device	= nommu_sync_single,
-	.sync_sg_for_device	= nommu_sync_sg,
+	.sync_single_for_device	= nommu_sync_single_for_device,
+	.sync_sg_for_device	= nommu_sync_sg_for_device,
 #endif
 #endif
 	.is_phys		= 1,
 	.is_phys		= 1,
 };
 };

+ 3 - 3
arch/sh/mm/consistent.c

@@ -49,7 +49,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
 	 * Pages from the page allocator may have data present in
 	 * Pages from the page allocator may have data present in
 	 * cache. So flush the cache before using uncached memory.
 	 * cache. So flush the cache before using uncached memory.
 	 */
 	 */
-	dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
+	sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);
 
 
 	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
 	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
 	if (!ret_nocache) {
 	if (!ret_nocache) {
@@ -78,7 +78,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
 	iounmap(vaddr);
 	iounmap(vaddr);
 }
 }
 
 
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+void sh_sync_dma_for_device(void *vaddr, size_t size,
 		    enum dma_data_direction direction)
 		    enum dma_data_direction direction)
 {
 {
 	void *addr;
 	void *addr;
@@ -100,7 +100,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 		BUG();
 		BUG();
 	}
 	}
 }
 }
-EXPORT_SYMBOL(dma_cache_sync);
+EXPORT_SYMBOL(sh_sync_dma_for_device);
 
 
 static int __init memchunk_setup(char *str)
 static int __init memchunk_setup(char *str)
 {
 {

+ 0 - 8
arch/sparc/include/asm/dma-mapping.h

@@ -6,14 +6,6 @@
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/dma-debug.h>
 #include <linux/dma-debug.h>
 
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-				  enum dma_data_direction dir)
-{
-	/* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
-	 * routine can be a nop.
-	 */
-}
-
 extern const struct dma_map_ops *dma_ops;
 extern const struct dma_map_ops *dma_ops;
 extern const struct dma_map_ops pci32_dma_ops;
 extern const struct dma_map_ops pci32_dma_ops;
 
 

+ 0 - 1
arch/sparc/include/asm/floppy_32.h

@@ -71,7 +71,6 @@ static struct sun_floppy_ops sun_fdops;
 #define fd_set_dma_count(count)   sun_fd_set_dma_count(count)
 #define fd_set_dma_count(count)   sun_fd_set_dma_count(count)
 #define fd_enable_irq()           /* nothing... */
 #define fd_enable_irq()           /* nothing... */
 #define fd_disable_irq()          /* nothing... */
 #define fd_disable_irq()          /* nothing... */
-#define fd_cacheflush(addr, size) /* nothing... */
 #define fd_request_irq()          sun_fd_request_irq()
 #define fd_request_irq()          sun_fd_request_irq()
 #define fd_free_irq()             /* nothing... */
 #define fd_free_irq()             /* nothing... */
 #if 0  /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */
 #if 0  /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */

+ 0 - 1
arch/sparc/include/asm/floppy_64.h

@@ -73,7 +73,6 @@ static struct sun_floppy_ops sun_fdops;
 #define fd_set_dma_addr(addr)     sun_fdops.fd_set_dma_addr(addr)
 #define fd_set_dma_addr(addr)     sun_fdops.fd_set_dma_addr(addr)
 #define fd_set_dma_count(count)   sun_fdops.fd_set_dma_count(count)
 #define fd_set_dma_count(count)   sun_fdops.fd_set_dma_count(count)
 #define get_dma_residue(x)        sun_fdops.get_dma_residue()
 #define get_dma_residue(x)        sun_fdops.get_dma_residue()
-#define fd_cacheflush(addr, size) /* nothing... */
 #define fd_request_irq()          sun_fdops.fd_request_irq()
 #define fd_request_irq()          sun_fdops.fd_request_irq()
 #define fd_free_irq()             sun_fdops.fd_free_irq()
 #define fd_free_irq()             sun_fdops.fd_free_irq()
 #define fd_eject(drive)           sun_fdops.fd_eject(drive)
 #define fd_eject(drive)           sun_fdops.fd_eject(drive)

+ 0 - 9
arch/tile/include/asm/dma-mapping.h

@@ -67,13 +67,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 #define HAVE_ARCH_DMA_SET_MASK 1
 #define HAVE_ARCH_DMA_SET_MASK 1
 int dma_set_mask(struct device *dev, u64 mask);
 int dma_set_mask(struct device *dev, u64 mask);
 
 
-/*
- * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
- * do any flushing here.
- */
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-				  enum dma_data_direction direction)
-{
-}
-
 #endif /* _ASM_TILE_DMA_MAPPING_H */
 #endif /* _ASM_TILE_DMA_MAPPING_H */

+ 0 - 9
arch/unicore32/include/asm/cacheflush.h

@@ -101,15 +101,6 @@ extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
 extern void __cpuc_flush_dcache_area(void *, size_t);
 extern void __cpuc_flush_dcache_area(void *, size_t);
 extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size);
 extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size);
 
 
-/*
- * These are private to the dma-mapping API.  Do not use directly.
- * Their sole purpose is to ensure that data held in the cache
- * is visible to DMA, or data written by DMA to system memory is
- * visible to the CPU.
- */
-extern void __cpuc_dma_clean_range(unsigned long, unsigned long);
-extern void __cpuc_dma_flush_range(unsigned long, unsigned long);
-
 /*
 /*
  * Copy user data from/to a page which is mapped into a different
  * Copy user data from/to a page which is mapped into a different
  * processes address space.  Really, we want to allow our "user
  * processes address space.  Really, we want to allow our "user

+ 0 - 22
arch/unicore32/include/asm/dma-mapping.h

@@ -18,9 +18,6 @@
 #include <linux/scatterlist.h>
 #include <linux/scatterlist.h>
 #include <linux/swiotlb.h>
 #include <linux/swiotlb.h>
 
 
-#include <asm/memory.h>
-#include <asm/cacheflush.h>
-
 extern const struct dma_map_ops swiotlb_dma_map_ops;
 extern const struct dma_map_ops swiotlb_dma_map_ops;
 
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
@@ -48,24 +45,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 
 
 static inline void dma_mark_clean(void *addr, size_t size) {}
 static inline void dma_mark_clean(void *addr, size_t size) {}
 
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr,
-		size_t size, enum dma_data_direction direction)
-{
-	unsigned long start = (unsigned long)vaddr;
-	unsigned long end   = start + size;
-
-	switch (direction) {
-	case DMA_NONE:
-		BUG();
-	case DMA_FROM_DEVICE:
-	case DMA_BIDIRECTIONAL:	/* writeback and invalidate */
-		__cpuc_dma_flush_range(start, end);
-		break;
-	case DMA_TO_DEVICE:		/* writeback only */
-		__cpuc_dma_clean_range(start, end);
-		break;
-	}
-}
-
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif
 #endif

+ 0 - 3
arch/unicore32/mm/proc-syms.c

@@ -20,6 +20,3 @@ EXPORT_SYMBOL(cpu_dcache_clean_area);
 EXPORT_SYMBOL(cpu_set_pte);
 EXPORT_SYMBOL(cpu_set_pte);
 
 
 EXPORT_SYMBOL(__cpuc_coherent_kern_range);
 EXPORT_SYMBOL(__cpuc_coherent_kern_range);
-
-EXPORT_SYMBOL(__cpuc_dma_flush_range);
-EXPORT_SYMBOL(__cpuc_dma_clean_range);

+ 0 - 7
arch/x86/include/asm/dma-mapping.h

@@ -68,13 +68,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 }
 }
 #endif /* CONFIG_X86_DMA_REMAP */
 #endif /* CONFIG_X86_DMA_REMAP */
 
 
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-	enum dma_data_direction dir)
-{
-	flush_write_buffers();
-}
-
 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
 						    gfp_t gfp)
 						    gfp_t gfp)
 {
 {

+ 0 - 3
arch/xtensa/include/asm/dma-mapping.h

@@ -23,9 +23,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 	return &xtensa_dma_map_ops;
 	return &xtensa_dma_map_ops;
 }
 }
 
 
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-		    enum dma_data_direction direction);
-
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
 {
 	return (dma_addr_t)paddr;
 	return (dma_addr_t)paddr;

+ 0 - 23
arch/xtensa/kernel/pci-dma.c

@@ -26,29 +26,6 @@
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 #include <asm/io.h>
 
 
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-		    enum dma_data_direction dir)
-{
-	switch (dir) {
-	case DMA_BIDIRECTIONAL:
-		__flush_invalidate_dcache_range((unsigned long)vaddr, size);
-		break;
-
-	case DMA_FROM_DEVICE:
-		__invalidate_dcache_range((unsigned long)vaddr, size);
-		break;
-
-	case DMA_TO_DEVICE:
-		__flush_dcache_range((unsigned long)vaddr, size);
-		break;
-
-	case DMA_NONE:
-		BUG();
-		break;
-	}
-}
-EXPORT_SYMBOL(dma_cache_sync);
-
 static void do_cache_op(dma_addr_t dma_handle, size_t size,
 static void do_cache_op(dma_addr_t dma_handle, size_t size,
 			void (*fn)(unsigned long, unsigned long))
 			void (*fn)(unsigned long, unsigned long))
 {
 {

+ 1 - 0
drivers/amba/bus.c

@@ -195,6 +195,7 @@ struct bus_type amba_bustype = {
 	.match		= amba_match,
 	.match		= amba_match,
 	.uevent		= amba_uevent,
 	.uevent		= amba_uevent,
 	.pm		= &amba_pm,
 	.pm		= &amba_pm,
+	.force_dma	= true,
 };
 };
 
 
 static int __init amba_init(void)
 static int __init amba_init(void)

+ 1 - 0
drivers/base/platform.c

@@ -1143,6 +1143,7 @@ struct bus_type platform_bus_type = {
 	.match		= platform_match,
 	.match		= platform_match,
 	.uevent		= platform_uevent,
 	.uevent		= platform_uevent,
 	.pm		= &platform_dev_pm_ops,
 	.pm		= &platform_dev_pm_ops,
+	.force_dma	= true,
 };
 };
 EXPORT_SYMBOL_GPL(platform_bus_type);
 EXPORT_SYMBOL_GPL(platform_bus_type);
 
 

+ 4 - 0
drivers/block/floppy.c

@@ -275,6 +275,10 @@ static int set_next_request(void);
 #define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size))
 #define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size))
 #endif
 #endif
 
 
+#ifndef fd_cacheflush
+#define fd_cacheflush(addr, size) /* nothing... */
+#endif
+
 static inline void fallback_on_nodma_alloc(char **addr, size_t l)
 static inline void fallback_on_nodma_alloc(char **addr, size_t l)
 {
 {
 #ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
 #ifdef FLOPPY_CAN_FALLBACK_ON_NODMA

+ 1 - 0
drivers/gpu/host1x/bus.c

@@ -320,6 +320,7 @@ struct bus_type host1x_bus_type = {
 	.name = "host1x",
 	.name = "host1x",
 	.match = host1x_device_match,
 	.match = host1x_device_match,
 	.pm = &host1x_device_pm_ops,
 	.pm = &host1x_device_pm_ops,
+	.force_dma = true,
 };
 };
 
 
 static void __host1x_device_del(struct host1x_device *device)
 static void __host1x_device_del(struct host1x_device *device)

+ 1 - 7
drivers/of/device.c

@@ -9,9 +9,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
 #include <linux/mod_devicetable.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
-#include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
-#include <linux/amba/bus.h>
 
 
 #include <asm/errno.h>
 #include <asm/errno.h>
 #include "of_private.h"
 #include "of_private.h"
@@ -101,11 +99,7 @@ int of_dma_configure(struct device *dev, struct device_node *np)
 		 * DMA configuration regardless of whether "dma-ranges" is
 		 * DMA configuration regardless of whether "dma-ranges" is
 		 * correctly specified or not.
 		 * correctly specified or not.
 		 */
 		 */
-		if (!dev_is_pci(dev) &&
-#ifdef CONFIG_ARM_AMBA
-		    dev->bus != &amba_bustype &&
-#endif
-		    dev->bus != &platform_bus_type)
+		if (!dev->bus->force_dma)
 			return ret == -ENODEV ? 0 : ret;
 			return ret == -ENODEV ? 0 : ret;
 
 
 		dma_addr = offset = 0;
 		dma_addr = offset = 0;

+ 1 - 0
drivers/pci/pci-driver.c

@@ -1516,6 +1516,7 @@ struct bus_type pci_bus_type = {
 	.drv_groups	= pci_drv_groups,
 	.drv_groups	= pci_drv_groups,
 	.pm		= PCI_PM_OPS_PTR,
 	.pm		= PCI_PM_OPS_PTR,
 	.num_vf		= pci_bus_num_vf,
 	.num_vf		= pci_bus_num_vf,
+	.force_dma	= true,
 };
 };
 EXPORT_SYMBOL(pci_bus_type);
 EXPORT_SYMBOL(pci_bus_type);
 
 

+ 2 - 3
drivers/sh/maple/maple.c

@@ -300,7 +300,7 @@ static void maple_send(void)
 	mutex_unlock(&maple_wlist_lock);
 	mutex_unlock(&maple_wlist_lock);
 	if (maple_packets > 0) {
 	if (maple_packets > 0) {
 		for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
 		for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
-			dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
+			sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
 				       PAGE_SIZE, DMA_BIDIRECTIONAL);
 				       PAGE_SIZE, DMA_BIDIRECTIONAL);
 	}
 	}
 
 
@@ -642,8 +642,7 @@ static void maple_dma_handler(struct work_struct *work)
 		list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
 		list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
 			mdev = mq->dev;
 			mdev = mq->dev;
 			recvbuf = mq->recvbuf->buf;
 			recvbuf = mq->recvbuf->buf;
-			dma_cache_sync(&mdev->dev, recvbuf, 0x400,
-				DMA_FROM_DEVICE);
+			sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
 			code = recvbuf[0];
 			code = recvbuf[0];
 			kfree(mq->sendbuf);
 			kfree(mq->sendbuf);
 			list_del_init(&mq->list);
 			list_del_init(&mq->list);

+ 4 - 0
include/linux/device.h

@@ -97,6 +97,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
  * @p:		The private data of the driver core, only the driver core can
  * @p:		The private data of the driver core, only the driver core can
  *		touch this.
  *		touch this.
  * @lock_key:	Lock class key for use by the lock validator
  * @lock_key:	Lock class key for use by the lock validator
+ * @force_dma:	Assume devices on this bus should be set up by dma_configure()
+ * 		even if DMA capability is not explicitly described by firmware.
  *
  *
  * A bus is a channel between the processor and one or more devices. For the
  * A bus is a channel between the processor and one or more devices. For the
  * purposes of the device model, all devices are connected via a bus, even if
  * purposes of the device model, all devices are connected via a bus, even if
@@ -135,6 +137,8 @@ struct bus_type {
 
 
 	struct subsys_private *p;
 	struct subsys_private *p;
 	struct lock_class_key lock_key;
 	struct lock_class_key lock_key;
+
+	bool force_dma;
 };
 };
 
 
 extern int __must_check bus_register(struct bus_type *bus);
 extern int __must_check bus_register(struct bus_type *bus);

+ 13 - 0
include/linux/dma-mapping.h

@@ -127,6 +127,8 @@ struct dma_map_ops {
 	void (*sync_sg_for_device)(struct device *dev,
 	void (*sync_sg_for_device)(struct device *dev,
 				   struct scatterlist *sg, int nents,
 				   struct scatterlist *sg, int nents,
 				   enum dma_data_direction dir);
 				   enum dma_data_direction dir);
+	void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
+			enum dma_data_direction direction);
 	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
 	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
 	int (*dma_supported)(struct device *dev, u64 mask);
 	int (*dma_supported)(struct device *dev, u64 mask);
 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
@@ -437,6 +439,17 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
 
 
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+		enum dma_data_direction dir)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	BUG_ON(!valid_dma_direction(dir));
+	if (ops->cache_sync)
+		ops->cache_sync(dev, vaddr, size, dir);
+}
+
 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
 			   void *cpu_addr, dma_addr_t dma_addr, size_t size);