|
@@ -142,6 +142,54 @@ static int __init cma_init_reserved_areas(void)
|
|
|
}
|
|
|
core_initcall(cma_init_reserved_areas);
|
|
|
|
|
|
+/**
|
|
|
+ * cma_init_reserved_mem() - create custom contiguous area from reserved memory
|
|
|
+ * @base: Base address of the reserved area
|
|
|
+ * @size: Size of the reserved area (in bytes),
|
|
|
+ * @order_per_bit: Order of pages represented by one bit on bitmap.
|
|
|
+ * @res_cma: Pointer to store the created cma region.
|
|
|
+ *
|
|
|
+ * This function creates custom contiguous area from already reserved memory.
|
|
|
+ */
|
|
|
+int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
|
|
+ int order_per_bit, struct cma **res_cma)
|
|
|
+{
|
|
|
+ struct cma *cma;
|
|
|
+ phys_addr_t alignment;
|
|
|
+
|
|
|
+ /* Sanity checks */
|
|
|
+ if (cma_area_count == ARRAY_SIZE(cma_areas)) {
|
|
|
+ pr_err("Not enough slots for CMA reserved regions!\n");
|
|
|
+ return -ENOSPC;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!size || !memblock_is_region_reserved(base, size))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* ensure minimal alignment requied by mm core */
|
|
|
+ alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
|
|
|
+
|
|
|
+ /* alignment should be aligned with order_per_bit */
|
|
|
+ if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Each reserved area must be initialised later, when more kernel
|
|
|
+ * subsystems (like slab allocator) are available.
|
|
|
+ */
|
|
|
+ cma = &cma_areas[cma_area_count];
|
|
|
+ cma->base_pfn = PFN_DOWN(base);
|
|
|
+ cma->count = size >> PAGE_SHIFT;
|
|
|
+ cma->order_per_bit = order_per_bit;
|
|
|
+ *res_cma = cma;
|
|
|
+ cma_area_count++;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* cma_declare_contiguous() - reserve custom contiguous area
|
|
|
* @base: Base address of the reserved area optional, use 0 for any
|
|
@@ -165,7 +213,6 @@ int __init cma_declare_contiguous(phys_addr_t base,
|
|
|
phys_addr_t alignment, unsigned int order_per_bit,
|
|
|
bool fixed, struct cma **res_cma)
|
|
|
{
|
|
|
- struct cma *cma;
|
|
|
phys_addr_t memblock_end = memblock_end_of_DRAM();
|
|
|
phys_addr_t highmem_start = __pa(high_memory);
|
|
|
int ret = 0;
|
|
@@ -237,16 +284,9 @@ int __init cma_declare_contiguous(phys_addr_t base,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Each reserved area must be initialised later, when more kernel
|
|
|
- * subsystems (like slab allocator) are available.
|
|
|
- */
|
|
|
- cma = &cma_areas[cma_area_count];
|
|
|
- cma->base_pfn = PFN_DOWN(base);
|
|
|
- cma->count = size >> PAGE_SHIFT;
|
|
|
- cma->order_per_bit = order_per_bit;
|
|
|
- *res_cma = cma;
|
|
|
- cma_area_count++;
|
|
|
+ ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
|
|
|
+ if (ret)
|
|
|
+ goto err;
|
|
|
|
|
|
pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
|
|
|
(unsigned long)base);
|