|
@@ -298,37 +298,19 @@ static void *
|
|
|
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
|
|
|
const void *caller)
|
|
|
{
|
|
|
- struct vm_struct *area;
|
|
|
- unsigned long addr;
|
|
|
-
|
|
|
/*
|
|
|
* DMA allocation can be mapped to user space, so lets
|
|
|
* set VM_USERMAP flags too.
|
|
|
*/
|
|
|
- area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
|
|
|
- caller);
|
|
|
- if (!area)
|
|
|
- return NULL;
|
|
|
- addr = (unsigned long)area->addr;
|
|
|
- area->phys_addr = __pfn_to_phys(page_to_pfn(page));
|
|
|
-
|
|
|
- if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
|
|
|
- vunmap((void *)addr);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- return (void *)addr;
|
|
|
+ return dma_common_contiguous_remap(page, size,
|
|
|
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP,
|
|
|
+ prot, caller);
|
|
|
}
|
|
|
|
|
|
static void __dma_free_remap(void *cpu_addr, size_t size)
|
|
|
{
|
|
|
- unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
|
|
|
- struct vm_struct *area = find_vm_area(cpu_addr);
|
|
|
- if (!area || (area->flags & flags) != flags) {
|
|
|
- WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
|
|
|
- return;
|
|
|
- }
|
|
|
- unmap_kernel_range((unsigned long)cpu_addr, size);
|
|
|
- vunmap(cpu_addr);
|
|
|
+ dma_common_free_remap(cpu_addr, size,
|
|
|
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP);
|
|
|
}
|
|
|
|
|
|
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
|
@@ -1271,29 +1253,8 @@ static void *
|
|
|
__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
|
|
|
const void *caller)
|
|
|
{
|
|
|
- unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
|
- struct vm_struct *area;
|
|
|
- unsigned long p;
|
|
|
-
|
|
|
- area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
|
|
|
- caller);
|
|
|
- if (!area)
|
|
|
- return NULL;
|
|
|
-
|
|
|
- area->pages = pages;
|
|
|
- area->nr_pages = nr_pages;
|
|
|
- p = (unsigned long)area->addr;
|
|
|
-
|
|
|
- for (i = 0; i < nr_pages; i++) {
|
|
|
- phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
|
|
|
- if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
|
|
|
- goto err;
|
|
|
- p += PAGE_SIZE;
|
|
|
- }
|
|
|
- return area->addr;
|
|
|
-err:
|
|
|
- unmap_kernel_range((unsigned long)area->addr, size);
|
|
|
- vunmap(area->addr);
|
|
|
+ return dma_common_pages_remap(pages, size,
|
|
|
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
@@ -1501,8 +1462,8 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|
|
}
|
|
|
|
|
|
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
|
|
|
- unmap_kernel_range((unsigned long)cpu_addr, size);
|
|
|
- vunmap(cpu_addr);
|
|
|
+ dma_common_free_remap(cpu_addr, size,
|
|
|
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP);
|
|
|
}
|
|
|
|
|
|
__iommu_remove_mapping(dev, handle, size);
|