|
@@ -5018,9 +5018,33 @@ static void __init find_zone_movable_pfns_for_nodes(void)
|
|
|
nodemask_t saved_node_state = node_states[N_MEMORY];
|
|
|
unsigned long totalpages = early_calculate_totalpages();
|
|
|
int usable_nodes = nodes_weight(node_states[N_MEMORY]);
|
|
|
+ struct memblock_type *type = &memblock.memory;
|
|
|
+
|
|
|
+ /* Need to find movable_zone earlier when movable_node is specified. */
|
|
|
+ find_usable_zone_for_movable();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If movable_node is specified, ignore kernelcore and movablecore
|
|
|
+ * options.
|
|
|
+ */
|
|
|
+ if (movable_node_is_enabled()) {
|
|
|
+ for (i = 0; i < type->cnt; i++) {
|
|
|
+ if (!memblock_is_hotpluggable(&type->regions[i]))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ nid = type->regions[i].nid;
|
|
|
+
|
|
|
+ usable_startpfn = PFN_DOWN(type->regions[i].base);
|
|
|
+ zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
|
|
|
+ min(usable_startpfn, zone_movable_pfn[nid]) :
|
|
|
+ usable_startpfn;
|
|
|
+ }
|
|
|
+
|
|
|
+ goto out2;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
- * If movablecore was specified, calculate what size of
|
|
|
+ * If movablecore=nn[KMG] was specified, calculate what size of
|
|
|
* kernelcore that corresponds so that memory usable for
|
|
|
* any allocation type is evenly spread. If both kernelcore
|
|
|
* and movablecore are specified, then the value of kernelcore
|
|
@@ -5046,7 +5070,6 @@ static void __init find_zone_movable_pfns_for_nodes(void)
|
|
|
goto out;
|
|
|
|
|
|
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
|
|
|
- find_usable_zone_for_movable();
|
|
|
usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
|
|
|
|
|
|
restart:
|
|
@@ -5137,6 +5160,7 @@ restart:
|
|
|
if (usable_nodes && required_kernelcore > usable_nodes)
|
|
|
goto restart;
|
|
|
|
|
|
+out2:
|
|
|
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
|
|
|
for (nid = 0; nid < MAX_NUMNODES; nid++)
|
|
|
zone_movable_pfn[nid] =
|