|
@@ -773,31 +773,6 @@ static void node_states_set_node(int node, struct memory_notify *arg)
|
|
node_set_state(node, N_MEMORY);
|
|
node_set_state(node, N_MEMORY);
|
|
}
|
|
}
|
|
|
|
|
|
-bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, int online_type)
|
|
|
|
-{
|
|
|
|
- struct pglist_data *pgdat = NODE_DATA(nid);
|
|
|
|
- struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
|
|
|
|
- struct zone *default_zone = default_zone_for_pfn(nid, pfn, nr_pages);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * TODO there shouldn't be any inherent reason to have ZONE_NORMAL
|
|
|
|
- * physically before ZONE_MOVABLE. All we need is they do not
|
|
|
|
- * overlap. Historically we didn't allow ZONE_NORMAL after ZONE_MOVABLE
|
|
|
|
- * though so let's stick with it for simplicity for now.
|
|
|
|
- * TODO make sure we do not overlap with ZONE_DEVICE
|
|
|
|
- */
|
|
|
|
- if (online_type == MMOP_ONLINE_KERNEL) {
|
|
|
|
- if (zone_is_empty(movable_zone))
|
|
|
|
- return true;
|
|
|
|
- return movable_zone->zone_start_pfn >= pfn + nr_pages;
|
|
|
|
- } else if (online_type == MMOP_ONLINE_MOVABLE) {
|
|
|
|
- return zone_end_pfn(default_zone) <= pfn;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* MMOP_ONLINE_KEEP will always succeed and inherits the current zone */
|
|
|
|
- return online_type == MMOP_ONLINE_KEEP;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
|
|
static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
|
|
unsigned long nr_pages)
|
|
unsigned long nr_pages)
|
|
{
|
|
{
|
|
@@ -856,7 +831,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone,
|
|
* If no kernel zone covers this pfn range it will automatically go
|
|
* If no kernel zone covers this pfn range it will automatically go
|
|
* to the ZONE_NORMAL.
|
|
* to the ZONE_NORMAL.
|
|
*/
|
|
*/
|
|
-struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
|
|
|
|
|
|
+static struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
|
|
unsigned long nr_pages)
|
|
unsigned long nr_pages)
|
|
{
|
|
{
|
|
struct pglist_data *pgdat = NODE_DATA(nid);
|
|
struct pglist_data *pgdat = NODE_DATA(nid);
|
|
@@ -872,6 +847,31 @@ struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
|
|
return &pgdat->node_zones[ZONE_NORMAL];
|
|
return &pgdat->node_zones[ZONE_NORMAL];
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, int online_type)
|
|
|
|
+{
|
|
|
|
+ struct pglist_data *pgdat = NODE_DATA(nid);
|
|
|
|
+ struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
|
|
|
|
+ struct zone *default_zone = default_zone_for_pfn(nid, pfn, nr_pages);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * TODO there shouldn't be any inherent reason to have ZONE_NORMAL
|
|
|
|
+ * physically before ZONE_MOVABLE. All we need is they do not
|
|
|
|
+ * overlap. Historically we didn't allow ZONE_NORMAL after ZONE_MOVABLE
|
|
|
|
+ * though so let's stick with it for simplicity for now.
|
|
|
|
+ * TODO make sure we do not overlap with ZONE_DEVICE
|
|
|
|
+ */
|
|
|
|
+ if (online_type == MMOP_ONLINE_KERNEL) {
|
|
|
|
+ if (zone_is_empty(movable_zone))
|
|
|
|
+ return true;
|
|
|
|
+ return movable_zone->zone_start_pfn >= pfn + nr_pages;
|
|
|
|
+ } else if (online_type == MMOP_ONLINE_MOVABLE) {
|
|
|
|
+ return zone_end_pfn(default_zone) <= pfn;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* MMOP_ONLINE_KEEP will always succeed and inherits the current zone */
|
|
|
|
+ return online_type == MMOP_ONLINE_KEEP;
|
|
|
|
+}
|
|
|
|
+
|
|
static inline bool movable_pfn_range(int nid, struct zone *default_zone,
|
|
static inline bool movable_pfn_range(int nid, struct zone *default_zone,
|
|
unsigned long start_pfn, unsigned long nr_pages)
|
|
unsigned long start_pfn, unsigned long nr_pages)
|
|
{
|
|
{
|
|
@@ -885,12 +885,8 @@ static inline bool movable_pfn_range(int nid, struct zone *default_zone,
|
|
return !zone_intersects(default_zone, start_pfn, nr_pages);
|
|
return !zone_intersects(default_zone, start_pfn, nr_pages);
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Associates the given pfn range with the given node and the zone appropriate
|
|
|
|
- * for the given online type.
|
|
|
|
- */
|
|
|
|
-static struct zone * __meminit move_pfn_range(int online_type, int nid,
|
|
|
|
- unsigned long start_pfn, unsigned long nr_pages)
|
|
|
|
|
|
+struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
|
|
|
|
+ unsigned long nr_pages)
|
|
{
|
|
{
|
|
struct pglist_data *pgdat = NODE_DATA(nid);
|
|
struct pglist_data *pgdat = NODE_DATA(nid);
|
|
struct zone *zone = default_zone_for_pfn(nid, start_pfn, nr_pages);
|
|
struct zone *zone = default_zone_for_pfn(nid, start_pfn, nr_pages);
|
|
@@ -909,6 +905,19 @@ static struct zone * __meminit move_pfn_range(int online_type, int nid,
|
|
zone = &pgdat->node_zones[ZONE_MOVABLE];
|
|
zone = &pgdat->node_zones[ZONE_MOVABLE];
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ return zone;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Associates the given pfn range with the given node and the zone appropriate
|
|
|
|
+ * for the given online type.
|
|
|
|
+ */
|
|
|
|
+static struct zone * __meminit move_pfn_range(int online_type, int nid,
|
|
|
|
+ unsigned long start_pfn, unsigned long nr_pages)
|
|
|
|
+{
|
|
|
|
+ struct zone *zone;
|
|
|
|
+
|
|
|
|
+ zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
|
|
move_pfn_range_to_zone(zone, start_pfn, nr_pages);
|
|
move_pfn_range_to_zone(zone, start_pfn, nr_pages);
|
|
return zone;
|
|
return zone;
|
|
}
|
|
}
|