|
@@ -49,6 +49,7 @@ struct arm_dma_alloc_args {
|
|
pgprot_t prot;
|
|
pgprot_t prot;
|
|
const void *caller;
|
|
const void *caller;
|
|
bool want_vaddr;
|
|
bool want_vaddr;
|
|
|
|
+ int coherent_flag;
|
|
};
|
|
};
|
|
|
|
|
|
struct arm_dma_free_args {
|
|
struct arm_dma_free_args {
|
|
@@ -59,6 +60,9 @@ struct arm_dma_free_args {
|
|
bool want_vaddr;
|
|
bool want_vaddr;
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+#define NORMAL 0
|
|
|
|
+#define COHERENT 1
|
|
|
|
+
|
|
struct arm_dma_allocator {
|
|
struct arm_dma_allocator {
|
|
void *(*alloc)(struct arm_dma_alloc_args *args,
|
|
void *(*alloc)(struct arm_dma_alloc_args *args,
|
|
struct page **ret_page);
|
|
struct page **ret_page);
|
|
@@ -272,7 +276,7 @@ static u64 get_coherent_dma_mask(struct device *dev)
|
|
return mask;
|
|
return mask;
|
|
}
|
|
}
|
|
|
|
|
|
-static void __dma_clear_buffer(struct page *page, size_t size)
|
|
|
|
|
|
+static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
* Ensure that the allocated pages are zeroed, and that any data
|
|
* Ensure that the allocated pages are zeroed, and that any data
|
|
@@ -284,17 +288,21 @@ static void __dma_clear_buffer(struct page *page, size_t size)
|
|
while (size > 0) {
|
|
while (size > 0) {
|
|
void *ptr = kmap_atomic(page);
|
|
void *ptr = kmap_atomic(page);
|
|
memset(ptr, 0, PAGE_SIZE);
|
|
memset(ptr, 0, PAGE_SIZE);
|
|
- dmac_flush_range(ptr, ptr + PAGE_SIZE);
|
|
|
|
|
|
+ if (coherent_flag != COHERENT)
|
|
|
|
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
|
|
kunmap_atomic(ptr);
|
|
kunmap_atomic(ptr);
|
|
page++;
|
|
page++;
|
|
size -= PAGE_SIZE;
|
|
size -= PAGE_SIZE;
|
|
}
|
|
}
|
|
- outer_flush_range(base, end);
|
|
|
|
|
|
+ if (coherent_flag != COHERENT)
|
|
|
|
+ outer_flush_range(base, end);
|
|
} else {
|
|
} else {
|
|
void *ptr = page_address(page);
|
|
void *ptr = page_address(page);
|
|
memset(ptr, 0, size);
|
|
memset(ptr, 0, size);
|
|
- dmac_flush_range(ptr, ptr + size);
|
|
|
|
- outer_flush_range(__pa(ptr), __pa(ptr) + size);
|
|
|
|
|
|
+ if (coherent_flag != COHERENT) {
|
|
|
|
+ dmac_flush_range(ptr, ptr + size);
|
|
|
|
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -302,7 +310,8 @@ static void __dma_clear_buffer(struct page *page, size_t size)
|
|
* Allocate a DMA buffer for 'dev' of size 'size' using the
|
|
* Allocate a DMA buffer for 'dev' of size 'size' using the
|
|
* specified gfp mask. Note that 'size' must be page aligned.
|
|
* specified gfp mask. Note that 'size' must be page aligned.
|
|
*/
|
|
*/
|
|
-static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
|
|
|
|
|
|
+static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
|
|
|
|
+ gfp_t gfp, int coherent_flag)
|
|
{
|
|
{
|
|
unsigned long order = get_order(size);
|
|
unsigned long order = get_order(size);
|
|
struct page *page, *p, *e;
|
|
struct page *page, *p, *e;
|
|
@@ -318,7 +327,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
|
|
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
|
|
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
|
|
__free_page(p);
|
|
__free_page(p);
|
|
|
|
|
|
- __dma_clear_buffer(page, size);
|
|
|
|
|
|
+ __dma_clear_buffer(page, size, coherent_flag);
|
|
|
|
|
|
return page;
|
|
return page;
|
|
}
|
|
}
|
|
@@ -340,7 +349,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
|
|
|
|
|
|
static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
|
static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
|
pgprot_t prot, struct page **ret_page,
|
|
pgprot_t prot, struct page **ret_page,
|
|
- const void *caller, bool want_vaddr);
|
|
|
|
|
|
+ const void *caller, bool want_vaddr,
|
|
|
|
+ int coherent_flag);
|
|
|
|
|
|
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|
pgprot_t prot, struct page **ret_page,
|
|
pgprot_t prot, struct page **ret_page,
|
|
@@ -405,10 +415,13 @@ static int __init atomic_pool_init(void)
|
|
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
|
|
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
|
|
if (!atomic_pool)
|
|
if (!atomic_pool)
|
|
goto out;
|
|
goto out;
|
|
-
|
|
|
|
|
|
+ /*
|
|
|
|
+ * The atomic pool is only used for non-coherent allocations
|
|
|
|
+ * so we must pass NORMAL for coherent_flag.
|
|
|
|
+ */
|
|
if (dev_get_cma_area(NULL))
|
|
if (dev_get_cma_area(NULL))
|
|
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
|
|
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
|
|
- &page, atomic_pool_init, true);
|
|
|
|
|
|
+ &page, atomic_pool_init, true, NORMAL);
|
|
else
|
|
else
|
|
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
|
|
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
|
|
&page, atomic_pool_init, true);
|
|
&page, atomic_pool_init, true);
|
|
@@ -522,7 +535,11 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|
{
|
|
{
|
|
struct page *page;
|
|
struct page *page;
|
|
void *ptr = NULL;
|
|
void *ptr = NULL;
|
|
- page = __dma_alloc_buffer(dev, size, gfp);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * __alloc_remap_buffer is only called when the device is
|
|
|
|
+ * non-coherent
|
|
|
|
+ */
|
|
|
|
+ page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
|
|
if (!page)
|
|
if (!page)
|
|
return NULL;
|
|
return NULL;
|
|
if (!want_vaddr)
|
|
if (!want_vaddr)
|
|
@@ -577,7 +594,8 @@ static int __free_from_pool(void *start, size_t size)
|
|
|
|
|
|
static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
|
static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
|
pgprot_t prot, struct page **ret_page,
|
|
pgprot_t prot, struct page **ret_page,
|
|
- const void *caller, bool want_vaddr)
|
|
|
|
|
|
+ const void *caller, bool want_vaddr,
|
|
|
|
+ int coherent_flag)
|
|
{
|
|
{
|
|
unsigned long order = get_order(size);
|
|
unsigned long order = get_order(size);
|
|
size_t count = size >> PAGE_SHIFT;
|
|
size_t count = size >> PAGE_SHIFT;
|
|
@@ -588,7 +606,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
|
if (!page)
|
|
if (!page)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
- __dma_clear_buffer(page, size);
|
|
|
|
|
|
+ __dma_clear_buffer(page, size, coherent_flag);
|
|
|
|
|
|
if (!want_vaddr)
|
|
if (!want_vaddr)
|
|
goto out;
|
|
goto out;
|
|
@@ -638,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
|
|
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
|
|
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
|
|
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
|
|
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
|
|
#define __alloc_from_pool(size, ret_page) NULL
|
|
#define __alloc_from_pool(size, ret_page) NULL
|
|
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
|
|
|
|
|
|
+#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL
|
|
#define __free_from_pool(cpu_addr, size) do { } while (0)
|
|
#define __free_from_pool(cpu_addr, size) do { } while (0)
|
|
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
|
|
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
|
|
#define __dma_free_remap(cpu_addr, size) do { } while (0)
|
|
#define __dma_free_remap(cpu_addr, size) do { } while (0)
|
|
@@ -649,7 +667,8 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|
struct page **ret_page)
|
|
struct page **ret_page)
|
|
{
|
|
{
|
|
struct page *page;
|
|
struct page *page;
|
|
- page = __dma_alloc_buffer(dev, size, gfp);
|
|
|
|
|
|
+ /* __alloc_simple_buffer is only called when the device is coherent */
|
|
|
|
+ page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
|
|
if (!page)
|
|
if (!page)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
@@ -679,7 +698,7 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
|
|
{
|
|
{
|
|
return __alloc_from_contiguous(args->dev, args->size, args->prot,
|
|
return __alloc_from_contiguous(args->dev, args->size, args->prot,
|
|
ret_page, args->caller,
|
|
ret_page, args->caller,
|
|
- args->want_vaddr);
|
|
|
|
|
|
+ args->want_vaddr, args->coherent_flag);
|
|
}
|
|
}
|
|
|
|
|
|
static void cma_allocator_free(struct arm_dma_free_args *args)
|
|
static void cma_allocator_free(struct arm_dma_free_args *args)
|
|
@@ -746,6 +765,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|
.prot = prot,
|
|
.prot = prot,
|
|
.caller = caller,
|
|
.caller = caller,
|
|
.want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
|
|
.want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
|
|
|
|
+ .coherent_flag = is_coherent ? COHERENT : NORMAL,
|
|
};
|
|
};
|
|
|
|
|
|
#ifdef CONFIG_DMA_API_DEBUG
|
|
#ifdef CONFIG_DMA_API_DEBUG
|
|
@@ -1253,7 +1273,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
|
static const int iommu_order_array[] = { 9, 8, 4, 0 };
|
|
static const int iommu_order_array[] = { 9, 8, 4, 0 };
|
|
|
|
|
|
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|
- gfp_t gfp, struct dma_attrs *attrs)
|
|
|
|
|
|
+ gfp_t gfp, struct dma_attrs *attrs,
|
|
|
|
+ int coherent_flag)
|
|
{
|
|
{
|
|
struct page **pages;
|
|
struct page **pages;
|
|
int count = size >> PAGE_SHIFT;
|
|
int count = size >> PAGE_SHIFT;
|
|
@@ -1277,7 +1298,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|
if (!page)
|
|
if (!page)
|
|
goto error;
|
|
goto error;
|
|
|
|
|
|
- __dma_clear_buffer(page, size);
|
|
|
|
|
|
+ __dma_clear_buffer(page, size, coherent_flag);
|
|
|
|
|
|
for (i = 0; i < count; i++)
|
|
for (i = 0; i < count; i++)
|
|
pages[i] = page + i;
|
|
pages[i] = page + i;
|
|
@@ -1327,7 +1348,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|
pages[i + j] = pages[i] + j;
|
|
pages[i + j] = pages[i] + j;
|
|
}
|
|
}
|
|
|
|
|
|
- __dma_clear_buffer(pages[i], PAGE_SIZE << order);
|
|
|
|
|
|
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
|
|
i += 1 << order;
|
|
i += 1 << order;
|
|
count -= 1 << order;
|
|
count -= 1 << order;
|
|
}
|
|
}
|
|
@@ -1505,7 +1526,8 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
|
*/
|
|
*/
|
|
gfp &= ~(__GFP_COMP);
|
|
gfp &= ~(__GFP_COMP);
|
|
|
|
|
|
- pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
|
|
|
|
|
|
+ /* For now always consider we are in a non-coherent case */
|
|
|
|
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs, NORMAL);
|
|
if (!pages)
|
|
if (!pages)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|