|
@@ -103,6 +103,9 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
|
|
};
|
|
};
|
|
EXPORT_SYMBOL(node_states);
|
|
EXPORT_SYMBOL(node_states);
|
|
|
|
|
|
|
|
+/* Protect totalram_pages and zone->managed_pages */
|
|
|
|
+static DEFINE_SPINLOCK(managed_page_count_lock);
|
|
|
|
+
|
|
unsigned long totalram_pages __read_mostly;
|
|
unsigned long totalram_pages __read_mostly;
|
|
unsigned long totalreserve_pages __read_mostly;
|
|
unsigned long totalreserve_pages __read_mostly;
|
|
/*
|
|
/*
|
|
@@ -5206,6 +5209,14 @@ early_param("movablecore", cmdline_parse_movablecore);
|
|
|
|
|
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
|
|
|
|
|
+void adjust_managed_page_count(struct page *page, long count)
|
|
|
|
+{
|
|
|
|
+ spin_lock(&managed_page_count_lock);
|
|
|
|
+ page_zone(page)->managed_pages += count;
|
|
|
|
+ totalram_pages += count;
|
|
|
|
+ spin_unlock(&managed_page_count_lock);
|
|
|
|
+}
|
|
|
|
+
|
|
unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
|
|
unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
|
|
{
|
|
{
|
|
void *pos;
|
|
void *pos;
|