|
@@ -42,7 +42,7 @@ static int order_to_index(unsigned int order)
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|
|
-static unsigned int order_to_size(int order)
|
|
|
|
|
|
+static inline unsigned int order_to_size(int order)
|
|
{
|
|
{
|
|
return PAGE_SIZE << order;
|
|
return PAGE_SIZE << order;
|
|
}
|
|
}
|
|
@@ -79,8 +79,6 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
|
|
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
|
|
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
|
|
DMA_BIDIRECTIONAL);
|
|
DMA_BIDIRECTIONAL);
|
|
}
|
|
}
|
|
- if (!page)
|
|
|
|
- return NULL;
|
|
|
|
|
|
|
|
return page;
|
|
return page;
|
|
}
|
|
}
|
|
@@ -126,7 +124,6 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
|
|
|
|
|
|
info->page = page;
|
|
info->page = page;
|
|
info->order = orders[i];
|
|
info->order = orders[i];
|
|
- INIT_LIST_HEAD(&info->list);
|
|
|
|
return info;
|
|
return info;
|
|
}
|
|
}
|
|
kfree(info);
|
|
kfree(info);
|
|
@@ -144,7 +141,6 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
|
heap);
|
|
heap);
|
|
struct sg_table *table;
|
|
struct sg_table *table;
|
|
struct scatterlist *sg;
|
|
struct scatterlist *sg;
|
|
- int ret;
|
|
|
|
struct list_head pages;
|
|
struct list_head pages;
|
|
struct page_info *info, *tmp_info;
|
|
struct page_info *info, *tmp_info;
|
|
int i = 0;
|
|
int i = 0;
|
|
@@ -162,24 +158,23 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
|
info = alloc_largest_available(sys_heap, buffer, size_remaining,
|
|
info = alloc_largest_available(sys_heap, buffer, size_remaining,
|
|
max_order);
|
|
max_order);
|
|
if (!info)
|
|
if (!info)
|
|
- goto err;
|
|
|
|
|
|
+ goto free_pages;
|
|
list_add_tail(&info->list, &pages);
|
|
list_add_tail(&info->list, &pages);
|
|
- size_remaining -= (1 << info->order) * PAGE_SIZE;
|
|
|
|
|
|
+ size_remaining -= PAGE_SIZE << info->order;
|
|
max_order = info->order;
|
|
max_order = info->order;
|
|
i++;
|
|
i++;
|
|
}
|
|
}
|
|
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
|
|
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
|
|
if (!table)
|
|
if (!table)
|
|
- goto err;
|
|
|
|
|
|
+ goto free_pages;
|
|
|
|
|
|
- ret = sg_alloc_table(table, i, GFP_KERNEL);
|
|
|
|
- if (ret)
|
|
|
|
- goto err1;
|
|
|
|
|
|
+ if (sg_alloc_table(table, i, GFP_KERNEL))
|
|
|
|
+ goto free_table;
|
|
|
|
|
|
sg = table->sgl;
|
|
sg = table->sgl;
|
|
list_for_each_entry_safe(info, tmp_info, &pages, list) {
|
|
list_for_each_entry_safe(info, tmp_info, &pages, list) {
|
|
struct page *page = info->page;
|
|
struct page *page = info->page;
|
|
- sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
|
|
|
|
|
|
+ sg_set_page(sg, page, PAGE_SIZE << info->order, 0);
|
|
sg = sg_next(sg);
|
|
sg = sg_next(sg);
|
|
list_del(&info->list);
|
|
list_del(&info->list);
|
|
kfree(info);
|
|
kfree(info);
|
|
@@ -187,9 +182,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
|
|
|
|
|
buffer->priv_virt = table;
|
|
buffer->priv_virt = table;
|
|
return 0;
|
|
return 0;
|
|
-err1:
|
|
|
|
|
|
+
|
|
|
|
+free_table:
|
|
kfree(table);
|
|
kfree(table);
|
|
-err:
|
|
|
|
|
|
+free_pages:
|
|
list_for_each_entry_safe(info, tmp_info, &pages, list) {
|
|
list_for_each_entry_safe(info, tmp_info, &pages, list) {
|
|
free_buffer_page(sys_heap, buffer, info->page, info->order);
|
|
free_buffer_page(sys_heap, buffer, info->page, info->order);
|
|
kfree(info);
|
|
kfree(info);
|
|
@@ -199,14 +195,12 @@ err:
|
|
|
|
|
|
static void ion_system_heap_free(struct ion_buffer *buffer)
|
|
static void ion_system_heap_free(struct ion_buffer *buffer)
|
|
{
|
|
{
|
|
- struct ion_heap *heap = buffer->heap;
|
|
|
|
- struct ion_system_heap *sys_heap = container_of(heap,
|
|
|
|
|
|
+ struct ion_system_heap *sys_heap = container_of(buffer->heap,
|
|
struct ion_system_heap,
|
|
struct ion_system_heap,
|
|
heap);
|
|
heap);
|
|
struct sg_table *table = buffer->sg_table;
|
|
struct sg_table *table = buffer->sg_table;
|
|
bool cached = ion_buffer_cached(buffer);
|
|
bool cached = ion_buffer_cached(buffer);
|
|
struct scatterlist *sg;
|
|
struct scatterlist *sg;
|
|
- LIST_HEAD(pages);
|
|
|
|
int i;
|
|
int i;
|
|
|
|
|
|
/* uncached pages come from the page pools, zero them before returning
|
|
/* uncached pages come from the page pools, zero them before returning
|
|
@@ -276,10 +270,10 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
|
|
|
|
|
|
seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
|
|
seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
|
|
pool->high_count, pool->order,
|
|
pool->high_count, pool->order,
|
|
- (1 << pool->order) * PAGE_SIZE * pool->high_count);
|
|
|
|
|
|
+ (PAGE_SIZE << pool->order) * pool->high_count);
|
|
seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
|
|
seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
|
|
pool->low_count, pool->order,
|
|
pool->low_count, pool->order,
|
|
- (1 << pool->order) * PAGE_SIZE * pool->low_count);
|
|
|
|
|
|
+ (PAGE_SIZE << pool->order) * pool->low_count);
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -298,7 +292,7 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
|
|
heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
|
|
heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
if (!heap->pools)
|
|
if (!heap->pools)
|
|
- goto err_alloc_pools;
|
|
|
|
|
|
+ goto free_heap;
|
|
for (i = 0; i < num_orders; i++) {
|
|
for (i = 0; i < num_orders; i++) {
|
|
struct ion_page_pool *pool;
|
|
struct ion_page_pool *pool;
|
|
gfp_t gfp_flags = low_order_gfp_flags;
|
|
gfp_t gfp_flags = low_order_gfp_flags;
|
|
@@ -307,18 +301,18 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
|
|
gfp_flags = high_order_gfp_flags;
|
|
gfp_flags = high_order_gfp_flags;
|
|
pool = ion_page_pool_create(gfp_flags, orders[i]);
|
|
pool = ion_page_pool_create(gfp_flags, orders[i]);
|
|
if (!pool)
|
|
if (!pool)
|
|
- goto err_create_pool;
|
|
|
|
|
|
+ goto destroy_pools;
|
|
heap->pools[i] = pool;
|
|
heap->pools[i] = pool;
|
|
}
|
|
}
|
|
|
|
|
|
heap->heap.debug_show = ion_system_heap_debug_show;
|
|
heap->heap.debug_show = ion_system_heap_debug_show;
|
|
return &heap->heap;
|
|
return &heap->heap;
|
|
-err_create_pool:
|
|
|
|
- for (i = 0; i < num_orders; i++)
|
|
|
|
- if (heap->pools[i])
|
|
|
|
- ion_page_pool_destroy(heap->pools[i]);
|
|
|
|
|
|
+
|
|
|
|
+destroy_pools:
|
|
|
|
+ while (i--)
|
|
|
|
+ ion_page_pool_destroy(heap->pools[i]);
|
|
kfree(heap->pools);
|
|
kfree(heap->pools);
|
|
-err_alloc_pools:
|
|
|
|
|
|
+free_heap:
|
|
kfree(heap);
|
|
kfree(heap);
|
|
return ERR_PTR(-ENOMEM);
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
}
|
|
@@ -364,12 +358,12 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
|
|
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
|
|
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
|
|
if (!table) {
|
|
if (!table) {
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
- goto out;
|
|
|
|
|
|
+ goto free_pages;
|
|
}
|
|
}
|
|
|
|
|
|
ret = sg_alloc_table(table, 1, GFP_KERNEL);
|
|
ret = sg_alloc_table(table, 1, GFP_KERNEL);
|
|
if (ret)
|
|
if (ret)
|
|
- goto out;
|
|
|
|
|
|
+ goto free_table;
|
|
|
|
|
|
sg_set_page(table->sgl, page, len, 0);
|
|
sg_set_page(table->sgl, page, len, 0);
|
|
|
|
|
|
@@ -379,10 +373,12 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
-out:
|
|
|
|
|
|
+free_table:
|
|
|
|
+ kfree(table);
|
|
|
|
+free_pages:
|
|
for (i = 0; i < len >> PAGE_SHIFT; i++)
|
|
for (i = 0; i < len >> PAGE_SHIFT; i++)
|
|
__free_page(page + i);
|
|
__free_page(page + i);
|
|
- kfree(table);
|
|
|
|
|
|
+
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -448,4 +444,3 @@ void ion_system_contig_heap_destroy(struct ion_heap *heap)
|
|
{
|
|
{
|
|
kfree(heap);
|
|
kfree(heap);
|
|
}
|
|
}
|
|
-
|
|
|