|
@@ -213,26 +213,9 @@ static int __init cma_init_reserved_areas(void)
|
|
|
}
|
|
|
core_initcall(cma_init_reserved_areas);
|
|
|
|
|
|
-/**
|
|
|
- * dma_contiguous_reserve_area() - reserve custom contiguous area
|
|
|
- * @size: Size of the reserved area (in bytes),
|
|
|
- * @base: Base address of the reserved area optional, use 0 for any
|
|
|
- * @limit: End address of the reserved memory (optional, 0 for any).
|
|
|
- * @res_cma: Pointer to store the created cma region.
|
|
|
- * @fixed: hint about where to place the reserved area
|
|
|
- *
|
|
|
- * This function reserves memory from early allocator. It should be
|
|
|
- * called by arch specific code once the early allocator (memblock or bootmem)
|
|
|
- * has been activated and all other subsystems have already allocated/reserved
|
|
|
- * memory. This function allows to create custom reserved areas for specific
|
|
|
- * devices.
|
|
|
- *
|
|
|
- * If @fixed is true, reserve contiguous area at exactly @base. If false,
|
|
|
- * reserve in range from @base to @limit.
|
|
|
- */
|
|
|
-int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
|
|
|
- phys_addr_t limit, struct cma **res_cma,
|
|
|
- bool fixed)
|
|
|
+static int __init __dma_contiguous_reserve_area(phys_addr_t size,
|
|
|
+ phys_addr_t base, phys_addr_t limit,
|
|
|
+ struct cma **res_cma, bool fixed)
|
|
|
{
|
|
|
struct cma *cma = &cma_areas[cma_area_count];
|
|
|
phys_addr_t alignment;
|
|
@@ -286,15 +269,47 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
|
|
|
|
|
|
pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
|
|
|
(unsigned long)base);
|
|
|
-
|
|
|
- /* Architecture specific contiguous memory fixup. */
|
|
|
- dma_contiguous_early_fixup(base, size);
|
|
|
return 0;
|
|
|
+
|
|
|
err:
|
|
|
pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
|
|
|
+ * @size: Size of the reserved area (in bytes),
|
|
|
+ * @base: Base address of the reserved area optional, use 0 for any
|
|
|
+ * @limit: End address of the reserved memory (optional, 0 for any).
|
|
|
+ * @res_cma: Pointer to store the created cma region.
|
|
|
+ * @fixed: hint about where to place the reserved area
|
|
|
+ *
|
|
|
+ * This function reserves memory from early allocator. It should be
|
|
|
+ * called by arch specific code once the early allocator (memblock or bootmem)
|
|
|
+ * has been activated and all other subsystems have already allocated/reserved
|
|
|
+ * memory. This function allows to create custom reserved areas for specific
|
|
|
+ * devices.
|
|
|
+ *
|
|
|
+ * If @fixed is true, reserve contiguous area at exactly @base. If false,
|
|
|
+ * reserve in range from @base to @limit.
|
|
|
+ */
|
|
|
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
|
|
|
+ phys_addr_t limit, struct cma **res_cma,
|
|
|
+ bool fixed)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /* Architecture specific contiguous memory fixup. */
|
|
|
+ dma_contiguous_early_fixup(PFN_PHYS((*res_cma)->base_pfn),
|
|
|
+ (*res_cma)->count << PAGE_SHIFT);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
|
|
|
{
|
|
|
mutex_lock(&cma->lock);
|
|
@@ -302,31 +317,16 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
|
|
|
mutex_unlock(&cma->lock);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * dma_alloc_from_contiguous() - allocate pages from contiguous area
|
|
|
- * @dev: Pointer to device for which the allocation is performed.
|
|
|
- * @count: Requested number of pages.
|
|
|
- * @align: Requested alignment of pages (in PAGE_SIZE order).
|
|
|
- *
|
|
|
- * This function allocates memory buffer for specified device. It uses
|
|
|
- * device specific contiguous memory area if available or the default
|
|
|
- * global one. Requires architecture specific dev_get_cma_area() helper
|
|
|
- * function.
|
|
|
- */
|
|
|
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
|
|
+static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
|
|
|
unsigned int align)
|
|
|
{
|
|
|
unsigned long mask, pfn, pageno, start = 0;
|
|
|
- struct cma *cma = dev_get_cma_area(dev);
|
|
|
struct page *page = NULL;
|
|
|
int ret;
|
|
|
|
|
|
if (!cma || !cma->count)
|
|
|
return NULL;
|
|
|
|
|
|
- if (align > CONFIG_CMA_ALIGNMENT)
|
|
|
- align = CONFIG_CMA_ALIGNMENT;
|
|
|
-
|
|
|
pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
|
|
|
count, align);
|
|
|
|
|
@@ -375,19 +375,30 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * dma_release_from_contiguous() - release allocated pages
|
|
|
- * @dev: Pointer to device for which the pages were allocated.
|
|
|
- * @pages: Allocated pages.
|
|
|
- * @count: Number of allocated pages.
|
|
|
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
|
|
|
+ * @dev: Pointer to device for which the allocation is performed.
|
|
|
+ * @count: Requested number of pages.
|
|
|
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
|
|
|
*
|
|
|
- * This function releases memory allocated by dma_alloc_from_contiguous().
|
|
|
- * It returns false when provided pages do not belong to contiguous area and
|
|
|
- * true otherwise.
|
|
|
+ * This function allocates memory buffer for specified device. It uses
|
|
|
+ * device specific contiguous memory area if available or the default
|
|
|
+ * global one. Requires architecture specific dev_get_cma_area() helper
|
|
|
+ * function.
|
|
|
*/
|
|
|
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|
|
- int count)
|
|
|
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
|
|
+ unsigned int align)
|
|
|
{
|
|
|
struct cma *cma = dev_get_cma_area(dev);
|
|
|
+
|
|
|
+ if (align > CONFIG_CMA_ALIGNMENT)
|
|
|
+ align = CONFIG_CMA_ALIGNMENT;
|
|
|
+
|
|
|
+ return __dma_alloc_from_contiguous(cma, count, align);
|
|
|
+}
|
|
|
+
|
|
|
+static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
|
|
|
+ int count)
|
|
|
+{
|
|
|
unsigned long pfn;
|
|
|
|
|
|
if (!cma || !pages)
|
|
@@ -407,3 +418,21 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|
|
|
|
|
return true;
|
|
|
}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dma_release_from_contiguous() - release allocated pages
|
|
|
+ * @dev: Pointer to device for which the pages were allocated.
|
|
|
+ * @pages: Allocated pages.
|
|
|
+ * @count: Number of allocated pages.
|
|
|
+ *
|
|
|
+ * This function releases memory allocated by dma_alloc_from_contiguous().
|
|
|
+ * It returns false when provided pages do not belong to contiguous area and
|
|
|
+ * true otherwise.
|
|
|
+ */
|
|
|
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|
|
+ int count)
|
|
|
+{
|
|
|
+ struct cma *cma = dev_get_cma_area(dev);
|
|
|
+
|
|
|
+ return __dma_release_from_contiguous(cma, pages, count);
|
|
|
+}
|