|
@@ -26,6 +26,61 @@
|
|
|
|
|
|
#include "internal.h"
|
|
|
|
|
|
+/**
|
|
|
+ * DOC: memblock overview
|
|
|
+ *
|
|
|
+ * Memblock is a method of managing memory regions during the early
|
|
|
+ * boot period when the usual kernel memory allocators are not up and
|
|
|
+ * running.
|
|
|
+ *
|
|
|
+ * Memblock views the system memory as collections of contiguous
|
|
|
+ * regions. There are several types of these collections:
|
|
|
+ *
|
|
|
+ * * ``memory`` - describes the physical memory available to the
|
|
|
+ * kernel; this may differ from the actual physical memory installed
|
|
|
+ * in the system, for instance when the memory is restricted with
|
|
|
+ * ``mem=`` command line parameter
|
|
|
+ * * ``reserved`` - describes the regions that were allocated
|
|
|
+ * * ``physmap`` - describes the actual physical memory regardless of
|
|
|
+ * the possible restrictions; the ``physmap`` type is only available
|
|
|
+ * on some architectures.
|
|
|
+ *
|
|
|
+ * Each region is represented by :c:type:`struct memblock_region` that
|
|
|
+ * defines the region extents, its attributes and NUMA node id on NUMA
|
|
|
+ * systems. Every memory type is described by the :c:type:`struct
|
|
|
+ * memblock_type` which contains an array of memory regions along with
|
|
|
+ * the allocator metadata. The memory types are nicely wrapped with
|
|
|
+ * :c:type:`struct memblock`. This structure is statically initialzed
|
|
|
+ * at build time. The region arrays for the "memory" and "reserved"
|
|
|
+ * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
|
|
|
+ * "physmap" type to %INIT_PHYSMEM_REGIONS.
|
|
|
+ * The :c:func:`memblock_allow_resize` enables automatic resizing of
|
|
|
+ * the region arrays during addition of new regions. This feature
|
|
|
+ * should be used with care so that memory allocated for the region
|
|
|
+ * array will not overlap with areas that should be reserved, for
|
|
|
+ * example initrd.
|
|
|
+ *
|
|
|
+ * The early architecture setup should tell memblock what the physical
|
|
|
+ * memory layout is by using :c:func:`memblock_add` or
|
|
|
+ * :c:func:`memblock_add_node` functions. The first function does not
|
|
|
+ * assign the region to a NUMA node and it is appropriate for UMA
|
|
|
+ * systems. Yet, it is possible to use it on NUMA systems as well and
|
|
|
+ * assign the region to a NUMA node later in the setup process using
|
|
|
+ * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
|
|
|
+ * performs such an assignment directly.
|
|
|
+ *
|
|
|
+ * Once memblock is setup the memory can be allocated using either
|
|
|
+ * memblock or bootmem APIs.
|
|
|
+ *
|
|
|
+ * As the system boot progresses, the architecture specific
|
|
|
+ * :c:func:`mem_init` function frees all the memory to the buddy page
|
|
|
+ * allocator.
|
|
|
+ *
|
|
|
+ * If an architecure enables %CONFIG_ARCH_DISCARD_MEMBLOCK, the
|
|
|
+ * memblock data structures will be discarded after the system
|
|
|
+ * initialization compltes.
|
|
|
+ */
|
|
|
+
|
|
|
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
|
|
|
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
|
|
@@ -60,7 +115,7 @@ static int memblock_can_resize __initdata_memblock;
|
|
|
static int memblock_memory_in_slab __initdata_memblock = 0;
|
|
|
static int memblock_reserved_in_slab __initdata_memblock = 0;
|
|
|
|
|
|
-ulong __init_memblock choose_memblock_flags(void)
|
|
|
+enum memblock_flags __init_memblock choose_memblock_flags(void)
|
|
|
{
|
|
|
return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
|
|
|
}
|
|
@@ -92,10 +147,11 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
|
|
|
return i < type->cnt;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
+/**
|
|
|
* __memblock_find_range_bottom_up - find free area utility in bottom-up
|
|
|
* @start: start of candidate range
|
|
|
- * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
|
|
|
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
|
|
|
+ * %MEMBLOCK_ALLOC_ACCESSIBLE
|
|
|
* @size: size of free area to find
|
|
|
* @align: alignment of free area to find
|
|
|
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
|
|
@@ -103,13 +159,13 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
|
|
|
*
|
|
|
* Utility called from memblock_find_in_range_node(), find free area bottom-up.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* Found address on success, 0 on failure.
|
|
|
*/
|
|
|
static phys_addr_t __init_memblock
|
|
|
__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
|
|
|
phys_addr_t size, phys_addr_t align, int nid,
|
|
|
- ulong flags)
|
|
|
+ enum memblock_flags flags)
|
|
|
{
|
|
|
phys_addr_t this_start, this_end, cand;
|
|
|
u64 i;
|
|
@@ -129,7 +185,8 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
|
|
|
/**
|
|
|
* __memblock_find_range_top_down - find free area utility, in top-down
|
|
|
* @start: start of candidate range
|
|
|
- * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
|
|
|
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
|
|
|
+ * %MEMBLOCK_ALLOC_ACCESSIBLE
|
|
|
* @size: size of free area to find
|
|
|
* @align: alignment of free area to find
|
|
|
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
|
|
@@ -137,13 +194,13 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
|
|
|
*
|
|
|
* Utility called from memblock_find_in_range_node(), find free area top-down.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* Found address on success, 0 on failure.
|
|
|
*/
|
|
|
static phys_addr_t __init_memblock
|
|
|
__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
|
|
|
phys_addr_t size, phys_addr_t align, int nid,
|
|
|
- ulong flags)
|
|
|
+ enum memblock_flags flags)
|
|
|
{
|
|
|
phys_addr_t this_start, this_end, cand;
|
|
|
u64 i;
|
|
@@ -169,7 +226,8 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
|
|
|
* @size: size of free area to find
|
|
|
* @align: alignment of free area to find
|
|
|
* @start: start of candidate range
|
|
|
- * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
|
|
|
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
|
|
|
+ * %MEMBLOCK_ALLOC_ACCESSIBLE
|
|
|
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
|
|
|
* @flags: pick from blocks based on memory attributes
|
|
|
*
|
|
@@ -183,12 +241,13 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
|
|
|
*
|
|
|
* If bottom-up allocation failed, will try to allocate memory top-down.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* Found address on success, 0 on failure.
|
|
|
*/
|
|
|
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
|
|
|
phys_addr_t align, phys_addr_t start,
|
|
|
- phys_addr_t end, int nid, ulong flags)
|
|
|
+ phys_addr_t end, int nid,
|
|
|
+ enum memblock_flags flags)
|
|
|
{
|
|
|
phys_addr_t kernel_end, ret;
|
|
|
|
|
@@ -237,13 +296,14 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
|
|
|
/**
|
|
|
* memblock_find_in_range - find free area in given range
|
|
|
* @start: start of candidate range
|
|
|
- * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
|
|
|
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
|
|
|
+ * %MEMBLOCK_ALLOC_ACCESSIBLE
|
|
|
* @size: size of free area to find
|
|
|
* @align: alignment of free area to find
|
|
|
*
|
|
|
* Find @size free area aligned to @align in the specified range.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* Found address on success, 0 on failure.
|
|
|
*/
|
|
|
phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
|
|
@@ -251,7 +311,7 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
|
|
|
phys_addr_t align)
|
|
|
{
|
|
|
phys_addr_t ret;
|
|
|
- ulong flags = choose_memblock_flags();
|
|
|
+ enum memblock_flags flags = choose_memblock_flags();
|
|
|
|
|
|
again:
|
|
|
ret = memblock_find_in_range_node(size, align, start, end,
|
|
@@ -287,7 +347,7 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
|
|
|
|
|
|
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
|
|
|
/**
|
|
|
- * Discard memory and reserved arrays if they were allocated
|
|
|
+ * memblock_discard - discard memory and reserved arrays if they were allocated
|
|
|
*/
|
|
|
void __init memblock_discard(void)
|
|
|
{
|
|
@@ -317,11 +377,11 @@ void __init memblock_discard(void)
|
|
|
*
|
|
|
* Double the size of the @type regions array. If memblock is being used to
|
|
|
* allocate memory for a new reserved regions array and there is a previously
|
|
|
- * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
|
|
|
+ * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
|
|
|
* waiting to be reserved, ensure the memory used by the new array does
|
|
|
* not overlap.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* 0 on success, -1 on failure.
|
|
|
*/
|
|
|
static int __init_memblock memblock_double_array(struct memblock_type *type,
|
|
@@ -466,13 +526,14 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
|
|
|
* @nid: node id of the new region
|
|
|
* @flags: flags of the new region
|
|
|
*
|
|
|
- * Insert new memblock region [@base,@base+@size) into @type at @idx.
|
|
|
+ * Insert new memblock region [@base, @base + @size) into @type at @idx.
|
|
|
* @type must already have extra room to accommodate the new region.
|
|
|
*/
|
|
|
static void __init_memblock memblock_insert_region(struct memblock_type *type,
|
|
|
int idx, phys_addr_t base,
|
|
|
phys_addr_t size,
|
|
|
- int nid, unsigned long flags)
|
|
|
+ int nid,
|
|
|
+ enum memblock_flags flags)
|
|
|
{
|
|
|
struct memblock_region *rgn = &type->regions[idx];
|
|
|
|
|
@@ -494,17 +555,17 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
|
|
|
* @nid: nid of the new region
|
|
|
* @flags: flags of the new region
|
|
|
*
|
|
|
- * Add new memblock region [@base,@base+@size) into @type. The new region
|
|
|
+ * Add new memblock region [@base, @base + @size) into @type. The new region
|
|
|
* is allowed to overlap with existing ones - overlaps don't affect already
|
|
|
* existing regions. @type is guaranteed to be minimal (all neighbouring
|
|
|
* compatible regions are merged) after the addition.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* 0 on success, -errno on failure.
|
|
|
*/
|
|
|
int __init_memblock memblock_add_range(struct memblock_type *type,
|
|
|
phys_addr_t base, phys_addr_t size,
|
|
|
- int nid, unsigned long flags)
|
|
|
+ int nid, enum memblock_flags flags)
|
|
|
{
|
|
|
bool insert = false;
|
|
|
phys_addr_t obase = base;
|
|
@@ -588,12 +649,35 @@ repeat:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * memblock_add_node - add new memblock region within a NUMA node
|
|
|
+ * @base: base address of the new region
|
|
|
+ * @size: size of the new region
|
|
|
+ * @nid: nid of the new region
|
|
|
+ *
|
|
|
+ * Add new memblock region [@base, @base + @size) to the "memory"
|
|
|
+ * type. See memblock_add_range() description for mode details
|
|
|
+ *
|
|
|
+ * Return:
|
|
|
+ * 0 on success, -errno on failure.
|
|
|
+ */
|
|
|
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
|
|
|
int nid)
|
|
|
{
|
|
|
return memblock_add_range(&memblock.memory, base, size, nid, 0);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * memblock_add - add new memblock region
|
|
|
+ * @base: base address of the new region
|
|
|
+ * @size: size of the new region
|
|
|
+ *
|
|
|
+ * Add new memblock region [@base, @base + @size) to the "memory"
|
|
|
+ * type. See memblock_add_range() description for mode details
|
|
|
+ *
|
|
|
+ * Return:
|
|
|
+ * 0 on success, -errno on failure.
|
|
|
+ */
|
|
|
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
|
|
|
{
|
|
|
phys_addr_t end = base + size - 1;
|
|
@@ -613,11 +697,11 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
|
|
|
* @end_rgn: out parameter for the end of isolated region
|
|
|
*
|
|
|
* Walk @type and ensure that regions don't cross the boundaries defined by
|
|
|
- * [@base,@base+@size). Crossing regions are split at the boundaries,
|
|
|
+ * [@base, @base + @size). Crossing regions are split at the boundaries,
|
|
|
* which may create at most two more regions. The index of the first
|
|
|
* region inside the range is returned in *@start_rgn and end in *@end_rgn.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* 0 on success, -errno on failure.
|
|
|
*/
|
|
|
static int __init_memblock memblock_isolate_range(struct memblock_type *type,
|
|
@@ -728,10 +812,15 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
+ * memblock_setclr_flag - set or clear flag for a memory region
|
|
|
+ * @base: base address of the region
|
|
|
+ * @size: size of the region
|
|
|
+ * @set: set or clear the flag
|
|
|
+ * @flag: the flag to udpate
|
|
|
*
|
|
|
* This function isolates region [@base, @base + @size), and sets/clears flag
|
|
|
*
|
|
|
- * Return 0 on success, -errno on failure.
|
|
|
+ * Return: 0 on success, -errno on failure.
|
|
|
*/
|
|
|
static int __init_memblock memblock_setclr_flag(phys_addr_t base,
|
|
|
phys_addr_t size, int set, int flag)
|
|
@@ -758,7 +847,7 @@ static int __init_memblock memblock_setclr_flag(phys_addr_t base,
|
|
|
* @base: the base phys addr of the region
|
|
|
* @size: the size of the region
|
|
|
*
|
|
|
- * Return 0 on success, -errno on failure.
|
|
|
+ * Return: 0 on success, -errno on failure.
|
|
|
*/
|
|
|
int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
|
|
|
{
|
|
@@ -770,7 +859,7 @@ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
|
|
|
* @base: the base phys addr of the region
|
|
|
* @size: the size of the region
|
|
|
*
|
|
|
- * Return 0 on success, -errno on failure.
|
|
|
+ * Return: 0 on success, -errno on failure.
|
|
|
*/
|
|
|
int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
|
|
|
{
|
|
@@ -782,7 +871,7 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
|
|
|
* @base: the base phys addr of the region
|
|
|
* @size: the size of the region
|
|
|
*
|
|
|
- * Return 0 on success, -errno on failure.
|
|
|
+ * Return: 0 on success, -errno on failure.
|
|
|
*/
|
|
|
int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
|
|
|
{
|
|
@@ -796,7 +885,7 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
|
|
|
* @base: the base phys addr of the region
|
|
|
* @size: the size of the region
|
|
|
*
|
|
|
- * Return 0 on success, -errno on failure.
|
|
|
+ * Return: 0 on success, -errno on failure.
|
|
|
*/
|
|
|
int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
|
|
|
{
|
|
@@ -808,7 +897,7 @@ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
|
|
|
* @base: the base phys addr of the region
|
|
|
* @size: the size of the region
|
|
|
*
|
|
|
- * Return 0 on success, -errno on failure.
|
|
|
+ * Return: 0 on success, -errno on failure.
|
|
|
*/
|
|
|
int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
|
|
|
{
|
|
@@ -873,7 +962,8 @@ void __init_memblock __next_reserved_mem_region(u64 *idx,
|
|
|
* As both region arrays are sorted, the function advances the two indices
|
|
|
* in lockstep and returns each intersection.
|
|
|
*/
|
|
|
-void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
|
|
|
+void __init_memblock __next_mem_range(u64 *idx, int nid,
|
|
|
+ enum memblock_flags flags,
|
|
|
struct memblock_type *type_a,
|
|
|
struct memblock_type *type_b,
|
|
|
phys_addr_t *out_start,
|
|
@@ -968,9 +1058,6 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
|
|
|
/**
|
|
|
* __next_mem_range_rev - generic next function for for_each_*_range_rev()
|
|
|
*
|
|
|
- * Finds the next range from type_a which is not marked as unsuitable
|
|
|
- * in type_b.
|
|
|
- *
|
|
|
* @idx: pointer to u64 loop variable
|
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
|
|
* @flags: pick from blocks based on memory attributes
|
|
@@ -980,9 +1067,13 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
|
|
|
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
|
* @out_nid: ptr to int for nid of the range, can be %NULL
|
|
|
*
|
|
|
+ * Finds the next range from type_a which is not marked as unsuitable
|
|
|
+ * in type_b.
|
|
|
+ *
|
|
|
* Reverse of __next_mem_range().
|
|
|
*/
|
|
|
-void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
|
|
|
+void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
|
|
|
+ enum memblock_flags flags,
|
|
|
struct memblock_type *type_a,
|
|
|
struct memblock_type *type_b,
|
|
|
phys_addr_t *out_start,
|
|
@@ -1114,10 +1205,10 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
|
|
|
* @type: memblock type to set node ID for
|
|
|
* @nid: node ID to set
|
|
|
*
|
|
|
- * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
|
|
|
+ * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
|
|
|
* Regions which cross the area boundaries are split as necessary.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* 0 on success, -errno on failure.
|
|
|
*/
|
|
|
int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
|
|
@@ -1140,7 +1231,8 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
|
|
|
|
|
|
static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
|
|
|
phys_addr_t align, phys_addr_t start,
|
|
|
- phys_addr_t end, int nid, ulong flags)
|
|
|
+ phys_addr_t end, int nid,
|
|
|
+ enum memblock_flags flags)
|
|
|
{
|
|
|
phys_addr_t found;
|
|
|
|
|
@@ -1162,7 +1254,7 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
|
|
|
|
|
|
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
|
|
|
phys_addr_t start, phys_addr_t end,
|
|
|
- ulong flags)
|
|
|
+ enum memblock_flags flags)
|
|
|
{
|
|
|
return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
|
|
|
flags);
|
|
@@ -1170,14 +1262,14 @@ phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
|
|
|
|
|
|
phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
|
|
|
phys_addr_t align, phys_addr_t max_addr,
|
|
|
- int nid, ulong flags)
|
|
|
+ int nid, enum memblock_flags flags)
|
|
|
{
|
|
|
return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
|
|
|
}
|
|
|
|
|
|
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
|
|
|
{
|
|
|
- ulong flags = choose_memblock_flags();
|
|
|
+ enum memblock_flags flags = choose_memblock_flags();
|
|
|
phys_addr_t ret;
|
|
|
|
|
|
again:
|
|
@@ -1240,7 +1332,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
|
|
|
* The allocation is performed from memory region limited by
|
|
|
* memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
|
|
|
*
|
|
|
- * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
|
|
|
+ * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0.
|
|
|
*
|
|
|
* The phys address of allocated boot memory block is converted to virtual and
|
|
|
* allocated memory is reset to 0.
|
|
@@ -1248,7 +1340,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
|
|
|
* In addition, function sets the min_count to 0 using kmemleak_alloc for
|
|
|
* allocated boot memory block, so that it is never reported as leaks.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* Virtual address of allocated memory block on success, NULL on failure.
|
|
|
*/
|
|
|
static void * __init memblock_virt_alloc_internal(
|
|
@@ -1258,7 +1350,7 @@ static void * __init memblock_virt_alloc_internal(
|
|
|
{
|
|
|
phys_addr_t alloc;
|
|
|
void *ptr;
|
|
|
- ulong flags = choose_memblock_flags();
|
|
|
+ enum memblock_flags flags = choose_memblock_flags();
|
|
|
|
|
|
if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
|
|
|
nid = NUMA_NO_NODE;
|
|
@@ -1333,7 +1425,7 @@ done:
|
|
|
* info), if enabled. Does not zero allocated memory, does not panic if request
|
|
|
* cannot be satisfied.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* Virtual address of allocated memory block on success, NULL on failure.
|
|
|
*/
|
|
|
void * __init memblock_virt_alloc_try_nid_raw(
|
|
@@ -1370,7 +1462,7 @@ void * __init memblock_virt_alloc_try_nid_raw(
|
|
|
* Public function, provides additional debug information (including caller
|
|
|
* info), if enabled. This function zeroes the allocated memory.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* Virtual address of allocated memory block on success, NULL on failure.
|
|
|
*/
|
|
|
void * __init memblock_virt_alloc_try_nid_nopanic(
|
|
@@ -1406,7 +1498,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic(
|
|
|
* which provides debug information (including caller info), if enabled,
|
|
|
* and panics if the request can not be satisfied.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* Virtual address of allocated memory block on success, NULL on failure.
|
|
|
*/
|
|
|
void * __init memblock_virt_alloc_try_nid(
|
|
@@ -1449,9 +1541,9 @@ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
|
|
|
memblock_remove_range(&memblock.reserved, base, size);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
+/**
|
|
|
* __memblock_free_late - free bootmem block pages directly to buddy allocator
|
|
|
- * @addr: phys starting address of the boot memory block
|
|
|
+ * @base: phys starting address of the boot memory block
|
|
|
* @size: size of the boot memory block in bytes
|
|
|
*
|
|
|
* This is only useful when the bootmem allocator has already been torn
|
|
@@ -1663,9 +1755,9 @@ int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
|
|
|
* @base: base of region to check
|
|
|
* @size: size of region to check
|
|
|
*
|
|
|
- * Check if the region [@base, @base+@size) is a subset of a memory block.
|
|
|
+ * Check if the region [@base, @base + @size) is a subset of a memory block.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* 0 if false, non-zero if true
|
|
|
*/
|
|
|
bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
|
|
@@ -1684,9 +1776,10 @@ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t siz
|
|
|
* @base: base of region to check
|
|
|
* @size: size of region to check
|
|
|
*
|
|
|
- * Check if the region [@base, @base+@size) intersects a reserved memory block.
|
|
|
+ * Check if the region [@base, @base + @size) intersects a reserved
|
|
|
+ * memory block.
|
|
|
*
|
|
|
- * RETURNS:
|
|
|
+ * Return:
|
|
|
* True if they intersect, false if not.
|
|
|
*/
|
|
|
bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
|
|
@@ -1733,7 +1826,7 @@ phys_addr_t __init_memblock memblock_get_current_limit(void)
|
|
|
static void __init_memblock memblock_dump(struct memblock_type *type)
|
|
|
{
|
|
|
phys_addr_t base, end, size;
|
|
|
- unsigned long flags;
|
|
|
+ enum memblock_flags flags;
|
|
|
int idx;
|
|
|
struct memblock_region *rgn;
|
|
|
|
|
@@ -1751,7 +1844,7 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
|
|
|
snprintf(nid_buf, sizeof(nid_buf), " on node %d",
|
|
|
memblock_get_region_node(rgn));
|
|
|
#endif
|
|
|
- pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
|
|
|
+ pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
|
|
|
type->name, idx, &base, &end, &size, nid_buf, flags);
|
|
|
}
|
|
|
}
|