|
@@ -26,6 +26,9 @@
|
|
|
struct ion_device *idev;
|
|
|
struct ion_heap **heaps;
|
|
|
|
|
|
+void *carveout_ptr;
|
|
|
+void *chunk_ptr;
|
|
|
+
|
|
|
struct ion_platform_heap dummy_heaps[] = {
|
|
|
{
|
|
|
.id = ION_HEAP_TYPE_SYSTEM,
|
|
@@ -37,10 +40,24 @@ struct ion_platform_heap dummy_heaps[] = {
|
|
|
.type = ION_HEAP_TYPE_SYSTEM_CONTIG,
|
|
|
.name = "system contig",
|
|
|
},
|
|
|
+ {
|
|
|
+ .id = ION_HEAP_TYPE_CARVEOUT,
|
|
|
+ .type = ION_HEAP_TYPE_CARVEOUT,
|
|
|
+ .name = "carveout",
|
|
|
+ .size = SZ_4M,
|
|
|
+ },
|
|
|
+ {
|
|
|
+ .id = ION_HEAP_TYPE_CHUNK,
|
|
|
+ .type = ION_HEAP_TYPE_CHUNK,
|
|
|
+ .name = "chunk",
|
|
|
+ .size = SZ_4M,
|
|
|
+ .align = SZ_16K,
|
|
|
+ .priv = (void *)(SZ_16K),
|
|
|
+ },
|
|
|
};
|
|
|
|
|
|
struct ion_platform_data dummy_ion_pdata = {
|
|
|
- .nr = 2,
|
|
|
+ .nr = 4,
|
|
|
.heaps = dummy_heaps,
|
|
|
};
|
|
|
|
|
@@ -54,9 +71,36 @@ static int __init ion_dummy_init(void)
|
|
|
if (!heaps)
|
|
|
return PTR_ERR(heaps);
|
|
|
|
|
|
+
|
|
|
+ /* Allocate a dummy carveout heap */
|
|
|
+ carveout_ptr = alloc_pages_exact(
|
|
|
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (carveout_ptr)
|
|
|
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
|
|
|
+ virt_to_phys(carveout_ptr);
|
|
|
+ else
|
|
|
+ pr_err("ion_dummy: Could not allocate carveout\n");
|
|
|
+
|
|
|
+ /* Allocate a dummy chunk heap */
|
|
|
+ chunk_ptr = alloc_pages_exact(
|
|
|
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (chunk_ptr)
|
|
|
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
|
|
|
+ else
|
|
|
+ pr_err("ion_dummy: Could not allocate chunk\n");
|
|
|
+
|
|
|
for (i = 0; i < dummy_ion_pdata.nr; i++) {
|
|
|
struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
|
|
|
|
|
|
+ if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
|
|
|
+ !heap_data->base)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
|
|
|
+ continue;
|
|
|
+
|
|
|
heaps[i] = ion_heap_create(heap_data);
|
|
|
if (IS_ERR_OR_NULL(heaps[i])) {
|
|
|
err = PTR_ERR(heaps[i]);
|
|
@@ -72,6 +116,16 @@ err:
|
|
|
}
|
|
|
kfree(heaps);
|
|
|
|
|
|
+ if (carveout_ptr) {
|
|
|
+ free_pages_exact(carveout_ptr,
|
|
|
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
|
|
|
+ carveout_ptr = NULL;
|
|
|
+ }
|
|
|
+ if (chunk_ptr) {
|
|
|
+ free_pages_exact(chunk_ptr,
|
|
|
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
|
|
|
+ chunk_ptr = NULL;
|
|
|
+ }
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -85,6 +139,17 @@ static void __exit ion_dummy_exit(void)
|
|
|
ion_heap_destroy(heaps[i]);
|
|
|
kfree(heaps);
|
|
|
|
|
|
+ if (carveout_ptr) {
|
|
|
+ free_pages_exact(carveout_ptr,
|
|
|
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
|
|
|
+ carveout_ptr = NULL;
|
|
|
+ }
|
|
|
+ if (chunk_ptr) {
|
|
|
+ free_pages_exact(chunk_ptr,
|
|
|
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
|
|
|
+ chunk_ptr = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
return;
|
|
|
}
|
|
|
|