|
@@ -53,6 +53,11 @@ unsigned long cma_get_size(const struct cma *cma)
|
|
|
return cma->count << PAGE_SHIFT;
|
|
|
}
|
|
|
|
|
|
+const char *cma_get_name(const struct cma *cma)
|
|
|
+{
|
|
|
+ return cma->name ? cma->name : "(undefined)";
|
|
|
+}
|
|
|
+
|
|
|
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
|
|
|
int align_order)
|
|
|
{
|
|
@@ -168,6 +173,7 @@ core_initcall(cma_init_reserved_areas);
|
|
|
*/
|
|
|
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
|
|
unsigned int order_per_bit,
|
|
|
+ const char *name,
|
|
|
struct cma **res_cma)
|
|
|
{
|
|
|
struct cma *cma;
|
|
@@ -198,6 +204,13 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
|
|
* subsystems (like slab allocator) are available.
|
|
|
*/
|
|
|
cma = &cma_areas[cma_area_count];
|
|
|
+ if (name) {
|
|
|
+ cma->name = name;
|
|
|
+ } else {
|
|
|
+ cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
|
|
|
+ if (!cma->name)
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
cma->base_pfn = PFN_DOWN(base);
|
|
|
cma->count = size >> PAGE_SHIFT;
|
|
|
cma->order_per_bit = order_per_bit;
|
|
@@ -229,7 +242,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
|
|
int __init cma_declare_contiguous(phys_addr_t base,
|
|
|
phys_addr_t size, phys_addr_t limit,
|
|
|
phys_addr_t alignment, unsigned int order_per_bit,
|
|
|
- bool fixed, struct cma **res_cma)
|
|
|
+ bool fixed, const char *name, struct cma **res_cma)
|
|
|
{
|
|
|
phys_addr_t memblock_end = memblock_end_of_DRAM();
|
|
|
phys_addr_t highmem_start;
|
|
@@ -335,7 +348,7 @@ int __init cma_declare_contiguous(phys_addr_t base,
|
|
|
base = addr;
|
|
|
}
|
|
|
|
|
|
- ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
|
|
|
+ ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
|