|
@@ -126,6 +126,8 @@ struct dma_map_ops {
|
|
void (*sync_sg_for_device)(struct device *dev,
|
|
void (*sync_sg_for_device)(struct device *dev,
|
|
struct scatterlist *sg, int nents,
|
|
struct scatterlist *sg, int nents,
|
|
enum dma_data_direction dir);
|
|
enum dma_data_direction dir);
|
|
|
|
+ void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
|
|
|
|
+ enum dma_data_direction direction);
|
|
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
|
|
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
|
|
int (*dma_supported)(struct device *dev, u64 mask);
|
|
int (*dma_supported)(struct device *dev, u64 mask);
|
|
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
|
|
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
|
|
@@ -436,6 +438,17 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
|
|
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
|
|
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
|
|
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
|
|
|
|
|
|
|
|
+static inline void
|
|
|
|
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|
|
|
+ enum dma_data_direction dir)
|
|
|
|
+{
|
|
|
|
+ const struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
|
+
|
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
|
+ if (ops->cache_sync)
|
|
|
|
+ ops->cache_sync(dev, vaddr, size, dir);
|
|
|
|
+}
|
|
|
|
+
|
|
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
|
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
|
|
|
|