|
@@ -472,7 +472,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * memblock_add_region - add new memblock region
|
|
|
|
|
|
+ * memblock_add_range - add new memblock region
|
|
* @type: memblock type to add new region into
|
|
* @type: memblock type to add new region into
|
|
* @base: base address of the new region
|
|
* @base: base address of the new region
|
|
* @size: size of the new region
|
|
* @size: size of the new region
|
|
@@ -487,7 +487,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
|
|
* RETURNS:
|
|
* RETURNS:
|
|
* 0 on success, -errno on failure.
|
|
* 0 on success, -errno on failure.
|
|
*/
|
|
*/
|
|
-static int __init_memblock memblock_add_region(struct memblock_type *type,
|
|
|
|
|
|
+int __init_memblock memblock_add_range(struct memblock_type *type,
|
|
phys_addr_t base, phys_addr_t size,
|
|
phys_addr_t base, phys_addr_t size,
|
|
int nid, unsigned long flags)
|
|
int nid, unsigned long flags)
|
|
{
|
|
{
|
|
@@ -569,12 +569,12 @@ repeat:
|
|
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
|
|
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
|
|
int nid)
|
|
int nid)
|
|
{
|
|
{
|
|
- return memblock_add_region(&memblock.memory, base, size, nid, 0);
|
|
|
|
|
|
+ return memblock_add_range(&memblock.memory, base, size, nid, 0);
|
|
}
|
|
}
|
|
|
|
|
|
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
|
|
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
|
|
{
|
|
{
|
|
- return memblock_add_region(&memblock.memory, base, size,
|
|
|
|
|
|
+ return memblock_add_range(&memblock.memory, base, size,
|
|
MAX_NUMNODES, 0);
|
|
MAX_NUMNODES, 0);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -654,8 +654,8 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int __init_memblock __memblock_remove(struct memblock_type *type,
|
|
|
|
- phys_addr_t base, phys_addr_t size)
|
|
|
|
|
|
+int __init_memblock memblock_remove_range(struct memblock_type *type,
|
|
|
|
+ phys_addr_t base, phys_addr_t size)
|
|
{
|
|
{
|
|
int start_rgn, end_rgn;
|
|
int start_rgn, end_rgn;
|
|
int i, ret;
|
|
int i, ret;
|
|
@@ -671,9 +671,10 @@ static int __init_memblock __memblock_remove(struct memblock_type *type,
|
|
|
|
|
|
int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
|
|
int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
|
|
{
|
|
{
|
|
- return __memblock_remove(&memblock.memory, base, size);
|
|
|
|
|
|
+ return memblock_remove_range(&memblock.memory, base, size);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+
|
|
int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
|
|
int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
|
|
{
|
|
{
|
|
memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
|
|
memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
|
|
@@ -681,7 +682,7 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
|
|
(unsigned long long)base + size - 1,
|
|
(unsigned long long)base + size - 1,
|
|
(void *)_RET_IP_);
|
|
(void *)_RET_IP_);
|
|
|
|
|
|
- return __memblock_remove(&memblock.reserved, base, size);
|
|
|
|
|
|
+ return memblock_remove_range(&memblock.reserved, base, size);
|
|
}
|
|
}
|
|
|
|
|
|
static int __init_memblock memblock_reserve_region(phys_addr_t base,
|
|
static int __init_memblock memblock_reserve_region(phys_addr_t base,
|
|
@@ -696,7 +697,7 @@ static int __init_memblock memblock_reserve_region(phys_addr_t base,
|
|
(unsigned long long)base + size - 1,
|
|
(unsigned long long)base + size - 1,
|
|
flags, (void *)_RET_IP_);
|
|
flags, (void *)_RET_IP_);
|
|
|
|
|
|
- return memblock_add_region(_rgn, base, size, nid, flags);
|
|
|
|
|
|
+ return memblock_add_range(_rgn, base, size, nid, flags);
|
|
}
|
|
}
|
|
|
|
|
|
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
|
|
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
|
|
@@ -758,17 +759,19 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * __next_free_mem_range - next function for for_each_free_mem_range()
|
|
|
|
|
|
+ * __next__mem_range - next function for for_each_free_mem_range() etc.
|
|
* @idx: pointer to u64 loop variable
|
|
* @idx: pointer to u64 loop variable
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
|
|
|
+ * @type_a: pointer to memblock_type from where the range is taken
|
|
|
|
+ * @type_b: pointer to memblock_type which excludes memory from being taken
|
|
* @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
* @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
* @out_nid: ptr to int for nid of the range, can be %NULL
|
|
* @out_nid: ptr to int for nid of the range, can be %NULL
|
|
*
|
|
*
|
|
- * Find the first free area from *@idx which matches @nid, fill the out
|
|
|
|
|
|
+ * Find the first area from *@idx which matches @nid, fill the out
|
|
* parameters, and update *@idx for the next iteration. The lower 32bit of
|
|
* parameters, and update *@idx for the next iteration. The lower 32bit of
|
|
- * *@idx contains index into memory region and the upper 32bit indexes the
|
|
|
|
- * areas before each reserved region. For example, if reserved regions
|
|
|
|
|
|
+ * *@idx contains index into type_a and the upper 32bit indexes the
|
|
|
|
+ * areas before each region in type_b. For example, if type_b regions
|
|
* look like the following,
|
|
* look like the following,
|
|
*
|
|
*
|
|
* 0:[0-16), 1:[32-48), 2:[128-130)
|
|
* 0:[0-16), 1:[32-48), 2:[128-130)
|
|
@@ -780,53 +783,77 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
|
|
* As both region arrays are sorted, the function advances the two indices
|
|
* As both region arrays are sorted, the function advances the two indices
|
|
* in lockstep and returns each intersection.
|
|
* in lockstep and returns each intersection.
|
|
*/
|
|
*/
|
|
-void __init_memblock __next_free_mem_range(u64 *idx, int nid,
|
|
|
|
- phys_addr_t *out_start,
|
|
|
|
- phys_addr_t *out_end, int *out_nid)
|
|
|
|
|
|
+void __init_memblock __next_mem_range(u64 *idx, int nid,
|
|
|
|
+ struct memblock_type *type_a,
|
|
|
|
+ struct memblock_type *type_b,
|
|
|
|
+ phys_addr_t *out_start,
|
|
|
|
+ phys_addr_t *out_end, int *out_nid)
|
|
{
|
|
{
|
|
- struct memblock_type *mem = &memblock.memory;
|
|
|
|
- struct memblock_type *rsv = &memblock.reserved;
|
|
|
|
- int mi = *idx & 0xffffffff;
|
|
|
|
- int ri = *idx >> 32;
|
|
|
|
|
|
+ int idx_a = *idx & 0xffffffff;
|
|
|
|
+ int idx_b = *idx >> 32;
|
|
|
|
|
|
- if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
|
|
|
|
|
|
+ if (WARN_ONCE(nid == MAX_NUMNODES,
|
|
|
|
+ "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
|
|
nid = NUMA_NO_NODE;
|
|
nid = NUMA_NO_NODE;
|
|
|
|
|
|
- for ( ; mi < mem->cnt; mi++) {
|
|
|
|
- struct memblock_region *m = &mem->regions[mi];
|
|
|
|
|
|
+ for (; idx_a < type_a->cnt; idx_a++) {
|
|
|
|
+ struct memblock_region *m = &type_a->regions[idx_a];
|
|
|
|
+
|
|
phys_addr_t m_start = m->base;
|
|
phys_addr_t m_start = m->base;
|
|
phys_addr_t m_end = m->base + m->size;
|
|
phys_addr_t m_end = m->base + m->size;
|
|
|
|
+ int m_nid = memblock_get_region_node(m);
|
|
|
|
|
|
/* only memory regions are associated with nodes, check it */
|
|
/* only memory regions are associated with nodes, check it */
|
|
- if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m))
|
|
|
|
|
|
+ if (nid != NUMA_NO_NODE && nid != m_nid)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- /* scan areas before each reservation for intersection */
|
|
|
|
- for ( ; ri < rsv->cnt + 1; ri++) {
|
|
|
|
- struct memblock_region *r = &rsv->regions[ri];
|
|
|
|
- phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
|
|
|
|
- phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
|
|
|
|
|
|
+ if (!type_b) {
|
|
|
|
+ if (out_start)
|
|
|
|
+ *out_start = m_start;
|
|
|
|
+ if (out_end)
|
|
|
|
+ *out_end = m_end;
|
|
|
|
+ if (out_nid)
|
|
|
|
+ *out_nid = m_nid;
|
|
|
|
+ idx_a++;
|
|
|
|
+ *idx = (u32)idx_a | (u64)idx_b << 32;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* scan areas before each reservation */
|
|
|
|
+ for (; idx_b < type_b->cnt + 1; idx_b++) {
|
|
|
|
+ struct memblock_region *r;
|
|
|
|
+ phys_addr_t r_start;
|
|
|
|
+ phys_addr_t r_end;
|
|
|
|
+
|
|
|
|
+ r = &type_b->regions[idx_b];
|
|
|
|
+ r_start = idx_b ? r[-1].base + r[-1].size : 0;
|
|
|
|
+ r_end = idx_b < type_b->cnt ?
|
|
|
|
+ r->base : ULLONG_MAX;
|
|
|
|
|
|
- /* if ri advanced past mi, break out to advance mi */
|
|
|
|
|
|
+ /*
|
|
|
|
+ * if idx_b advanced past idx_a,
|
|
|
|
+ * break out to advance idx_a
|
|
|
|
+ */
|
|
if (r_start >= m_end)
|
|
if (r_start >= m_end)
|
|
break;
|
|
break;
|
|
/* if the two regions intersect, we're done */
|
|
/* if the two regions intersect, we're done */
|
|
if (m_start < r_end) {
|
|
if (m_start < r_end) {
|
|
if (out_start)
|
|
if (out_start)
|
|
- *out_start = max(m_start, r_start);
|
|
|
|
|
|
+ *out_start =
|
|
|
|
+ max(m_start, r_start);
|
|
if (out_end)
|
|
if (out_end)
|
|
*out_end = min(m_end, r_end);
|
|
*out_end = min(m_end, r_end);
|
|
if (out_nid)
|
|
if (out_nid)
|
|
- *out_nid = memblock_get_region_node(m);
|
|
|
|
|
|
+ *out_nid = m_nid;
|
|
/*
|
|
/*
|
|
- * The region which ends first is advanced
|
|
|
|
- * for the next iteration.
|
|
|
|
|
|
+ * The region which ends first is
|
|
|
|
+ * advanced for the next iteration.
|
|
*/
|
|
*/
|
|
if (m_end <= r_end)
|
|
if (m_end <= r_end)
|
|
- mi++;
|
|
|
|
|
|
+ idx_a++;
|
|
else
|
|
else
|
|
- ri++;
|
|
|
|
- *idx = (u32)mi | (u64)ri << 32;
|
|
|
|
|
|
+ idx_b++;
|
|
|
|
+ *idx = (u32)idx_a | (u64)idx_b << 32;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -837,57 +864,80 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
|
|
|
|
|
|
+ * __next_mem_range_rev - generic next function for for_each_*_range_rev()
|
|
|
|
+ *
|
|
|
|
+ * Finds the next range from type_a which is not marked as unsuitable
|
|
|
|
+ * in type_b.
|
|
|
|
+ *
|
|
* @idx: pointer to u64 loop variable
|
|
* @idx: pointer to u64 loop variable
|
|
* @nid: nid: node selector, %NUMA_NO_NODE for all nodes
|
|
* @nid: nid: node selector, %NUMA_NO_NODE for all nodes
|
|
|
|
+ * @type_a: pointer to memblock_type from where the range is taken
|
|
|
|
+ * @type_b: pointer to memblock_type which excludes memory from being taken
|
|
* @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
* @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
* @out_nid: ptr to int for nid of the range, can be %NULL
|
|
* @out_nid: ptr to int for nid of the range, can be %NULL
|
|
*
|
|
*
|
|
- * Reverse of __next_free_mem_range().
|
|
|
|
- *
|
|
|
|
- * Linux kernel cannot migrate pages used by itself. Memory hotplug users won't
|
|
|
|
- * be able to hot-remove hotpluggable memory used by the kernel. So this
|
|
|
|
- * function skip hotpluggable regions if needed when allocating memory for the
|
|
|
|
- * kernel.
|
|
|
|
|
|
+ * Reverse of __next_mem_range().
|
|
*/
|
|
*/
|
|
-void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
|
|
|
|
- phys_addr_t *out_start,
|
|
|
|
- phys_addr_t *out_end, int *out_nid)
|
|
|
|
|
|
+void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
|
|
|
|
+ struct memblock_type *type_a,
|
|
|
|
+ struct memblock_type *type_b,
|
|
|
|
+ phys_addr_t *out_start,
|
|
|
|
+ phys_addr_t *out_end, int *out_nid)
|
|
{
|
|
{
|
|
- struct memblock_type *mem = &memblock.memory;
|
|
|
|
- struct memblock_type *rsv = &memblock.reserved;
|
|
|
|
- int mi = *idx & 0xffffffff;
|
|
|
|
- int ri = *idx >> 32;
|
|
|
|
|
|
+ int idx_a = *idx & 0xffffffff;
|
|
|
|
+ int idx_b = *idx >> 32;
|
|
|
|
|
|
if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
|
|
if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
|
|
nid = NUMA_NO_NODE;
|
|
nid = NUMA_NO_NODE;
|
|
|
|
|
|
if (*idx == (u64)ULLONG_MAX) {
|
|
if (*idx == (u64)ULLONG_MAX) {
|
|
- mi = mem->cnt - 1;
|
|
|
|
- ri = rsv->cnt;
|
|
|
|
|
|
+ idx_a = type_a->cnt - 1;
|
|
|
|
+ idx_b = type_b->cnt;
|
|
}
|
|
}
|
|
|
|
|
|
- for ( ; mi >= 0; mi--) {
|
|
|
|
- struct memblock_region *m = &mem->regions[mi];
|
|
|
|
|
|
+ for (; idx_a >= 0; idx_a--) {
|
|
|
|
+ struct memblock_region *m = &type_a->regions[idx_a];
|
|
|
|
+
|
|
phys_addr_t m_start = m->base;
|
|
phys_addr_t m_start = m->base;
|
|
phys_addr_t m_end = m->base + m->size;
|
|
phys_addr_t m_end = m->base + m->size;
|
|
|
|
+ int m_nid = memblock_get_region_node(m);
|
|
|
|
|
|
/* only memory regions are associated with nodes, check it */
|
|
/* only memory regions are associated with nodes, check it */
|
|
- if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m))
|
|
|
|
|
|
+ if (nid != NUMA_NO_NODE && nid != m_nid)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
/* skip hotpluggable memory regions if needed */
|
|
/* skip hotpluggable memory regions if needed */
|
|
if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
|
|
if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- /* scan areas before each reservation for intersection */
|
|
|
|
- for ( ; ri >= 0; ri--) {
|
|
|
|
- struct memblock_region *r = &rsv->regions[ri];
|
|
|
|
- phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
|
|
|
|
- phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
|
|
|
|
|
|
+ if (!type_b) {
|
|
|
|
+ if (out_start)
|
|
|
|
+ *out_start = m_start;
|
|
|
|
+ if (out_end)
|
|
|
|
+ *out_end = m_end;
|
|
|
|
+ if (out_nid)
|
|
|
|
+ *out_nid = m_nid;
|
|
|
|
+ idx_a++;
|
|
|
|
+ *idx = (u32)idx_a | (u64)idx_b << 32;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* scan areas before each reservation */
|
|
|
|
+ for (; idx_b >= 0; idx_b--) {
|
|
|
|
+ struct memblock_region *r;
|
|
|
|
+ phys_addr_t r_start;
|
|
|
|
+ phys_addr_t r_end;
|
|
|
|
+
|
|
|
|
+ r = &type_b->regions[idx_b];
|
|
|
|
+ r_start = idx_b ? r[-1].base + r[-1].size : 0;
|
|
|
|
+ r_end = idx_b < type_b->cnt ?
|
|
|
|
+ r->base : ULLONG_MAX;
|
|
|
|
+ /*
|
|
|
|
+ * if idx_b advanced past idx_a,
|
|
|
|
+ * break out to advance idx_a
|
|
|
|
+ */
|
|
|
|
|
|
- /* if ri advanced past mi, break out to advance mi */
|
|
|
|
if (r_end <= m_start)
|
|
if (r_end <= m_start)
|
|
break;
|
|
break;
|
|
/* if the two regions intersect, we're done */
|
|
/* if the two regions intersect, we're done */
|
|
@@ -897,18 +947,17 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
|
|
if (out_end)
|
|
if (out_end)
|
|
*out_end = min(m_end, r_end);
|
|
*out_end = min(m_end, r_end);
|
|
if (out_nid)
|
|
if (out_nid)
|
|
- *out_nid = memblock_get_region_node(m);
|
|
|
|
-
|
|
|
|
|
|
+ *out_nid = m_nid;
|
|
if (m_start >= r_start)
|
|
if (m_start >= r_start)
|
|
- mi--;
|
|
|
|
|
|
+ idx_a--;
|
|
else
|
|
else
|
|
- ri--;
|
|
|
|
- *idx = (u32)mi | (u64)ri << 32;
|
|
|
|
|
|
+ idx_b--;
|
|
|
|
+ *idx = (u32)idx_a | (u64)idx_b << 32;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
-
|
|
|
|
|
|
+ /* signal end of iteration */
|
|
*idx = ULLONG_MAX;
|
|
*idx = ULLONG_MAX;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1201,7 +1250,7 @@ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
|
|
__func__, (u64)base, (u64)base + size - 1,
|
|
__func__, (u64)base, (u64)base + size - 1,
|
|
(void *)_RET_IP_);
|
|
(void *)_RET_IP_);
|
|
kmemleak_free_part(__va(base), size);
|
|
kmemleak_free_part(__va(base), size);
|
|
- __memblock_remove(&memblock.reserved, base, size);
|
|
|
|
|
|
+ memblock_remove_range(&memblock.reserved, base, size);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1287,8 +1336,10 @@ void __init memblock_enforce_memory_limit(phys_addr_t limit)
|
|
}
|
|
}
|
|
|
|
|
|
/* truncate both memory and reserved regions */
|
|
/* truncate both memory and reserved regions */
|
|
- __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
|
|
|
|
- __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
|
|
|
|
|
|
+ memblock_remove_range(&memblock.memory, max_addr,
|
|
|
|
+ (phys_addr_t)ULLONG_MAX);
|
|
|
|
+ memblock_remove_range(&memblock.reserved, max_addr,
|
|
|
|
+ (phys_addr_t)ULLONG_MAX);
|
|
}
|
|
}
|
|
|
|
|
|
static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
|
|
static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
|