|
@@ -231,75 +231,34 @@ static void ion_system_heap_unmap_dma(struct ion_heap *heap,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
-static struct ion_heap_ops system_heap_ops = {
|
|
|
- .allocate = ion_system_heap_allocate,
|
|
|
- .free = ion_system_heap_free,
|
|
|
- .map_dma = ion_system_heap_map_dma,
|
|
|
- .unmap_dma = ion_system_heap_unmap_dma,
|
|
|
- .map_kernel = ion_heap_map_kernel,
|
|
|
- .unmap_kernel = ion_heap_unmap_kernel,
|
|
|
- .map_user = ion_heap_map_user,
|
|
|
-};
|
|
|
-
|
|
|
-static unsigned long ion_system_heap_shrink_count(struct shrinker *shrinker,
|
|
|
- struct shrink_control *sc)
|
|
|
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
|
|
+ int nr_to_scan)
|
|
|
{
|
|
|
- struct ion_heap *heap = container_of(shrinker, struct ion_heap,
|
|
|
- shrinker);
|
|
|
- struct ion_system_heap *sys_heap = container_of(heap,
|
|
|
- struct ion_system_heap,
|
|
|
- heap);
|
|
|
+ struct ion_system_heap *sys_heap;
|
|
|
int nr_total = 0;
|
|
|
int i;
|
|
|
|
|
|
- /* total number of items is whatever the page pools are holding
|
|
|
- plus whatever's in the freelist */
|
|
|
- for (i = 0; i < num_orders; i++) {
|
|
|
- struct ion_page_pool *pool = sys_heap->pools[i];
|
|
|
- nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
|
|
|
- }
|
|
|
- nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
|
|
|
- return nr_total;
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-static unsigned long ion_system_heap_shrink_scan(struct shrinker *shrinker,
|
|
|
- struct shrink_control *sc)
|
|
|
-{
|
|
|
-
|
|
|
- struct ion_heap *heap = container_of(shrinker, struct ion_heap,
|
|
|
- shrinker);
|
|
|
- struct ion_system_heap *sys_heap = container_of(heap,
|
|
|
- struct ion_system_heap,
|
|
|
- heap);
|
|
|
- int nr_freed = 0;
|
|
|
- int i;
|
|
|
-
|
|
|
- if (sc->nr_to_scan == 0)
|
|
|
- goto end;
|
|
|
-
|
|
|
- /* shrink the free list first, no point in zeroing the memory if
|
|
|
- we're just going to reclaim it */
|
|
|
- nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
|
|
|
- PAGE_SIZE;
|
|
|
-
|
|
|
- if (nr_freed >= sc->nr_to_scan)
|
|
|
- goto end;
|
|
|
+ sys_heap = container_of(heap, struct ion_system_heap, heap);
|
|
|
|
|
|
for (i = 0; i < num_orders; i++) {
|
|
|
struct ion_page_pool *pool = sys_heap->pools[i];
|
|
|
-
|
|
|
- nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
|
|
|
- sc->nr_to_scan);
|
|
|
- if (nr_freed >= sc->nr_to_scan)
|
|
|
- break;
|
|
|
+ nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
|
|
|
}
|
|
|
|
|
|
-end:
|
|
|
- return nr_freed;
|
|
|
-
|
|
|
+ return nr_total;
|
|
|
}
|
|
|
|
|
|
+static struct ion_heap_ops system_heap_ops = {
|
|
|
+ .allocate = ion_system_heap_allocate,
|
|
|
+ .free = ion_system_heap_free,
|
|
|
+ .map_dma = ion_system_heap_map_dma,
|
|
|
+ .unmap_dma = ion_system_heap_unmap_dma,
|
|
|
+ .map_kernel = ion_heap_map_kernel,
|
|
|
+ .unmap_kernel = ion_heap_unmap_kernel,
|
|
|
+ .map_user = ion_heap_map_user,
|
|
|
+ .shrink = ion_system_heap_shrink,
|
|
|
+};
|
|
|
+
|
|
|
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
|
|
|
void *unused)
|
|
|
{
|
|
@@ -347,11 +306,6 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
|
|
|
heap->pools[i] = pool;
|
|
|
}
|
|
|
|
|
|
- heap->heap.shrinker.scan_objects = ion_system_heap_shrink_scan;
|
|
|
- heap->heap.shrinker.count_objects = ion_system_heap_shrink_count;
|
|
|
- heap->heap.shrinker.seeks = DEFAULT_SEEKS;
|
|
|
- heap->heap.shrinker.batch = 0;
|
|
|
- register_shrinker(&heap->heap.shrinker);
|
|
|
heap->heap.debug_show = ion_system_heap_debug_show;
|
|
|
return &heap->heap;
|
|
|
err_create_pool:
|