|
|
@@ -303,9 +303,10 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
|
|
|
sg->length, dir);
|
|
|
}
|
|
|
|
|
|
-/* vma->vm_page_prot must be set appropriately before calling this function */
|
|
|
-static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
|
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
|
|
|
+static int __swiotlb_mmap(struct device *dev,
|
|
|
+ struct vm_area_struct *vma,
|
|
|
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
{
|
|
|
int ret = -ENXIO;
|
|
|
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
|
|
|
@@ -314,6 +315,9 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
|
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
|
|
|
unsigned long off = vma->vm_pgoff;
|
|
|
|
|
|
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
|
|
+ is_device_dma_coherent(dev));
|
|
|
+
|
|
|
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
|
|
return ret;
|
|
|
|
|
|
@@ -327,16 +331,6 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int __swiotlb_mmap(struct device *dev,
|
|
|
- struct vm_area_struct *vma,
|
|
|
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
|
|
- struct dma_attrs *attrs)
|
|
|
-{
|
|
|
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
|
|
- is_device_dma_coherent(dev));
|
|
|
- return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
|
|
|
-}
|
|
|
-
|
|
|
static struct dma_map_ops swiotlb_dma_ops = {
|
|
|
.alloc = __dma_alloc,
|
|
|
.free = __dma_free,
|