|
@@ -292,6 +292,26 @@ int page_group_by_mobility_disabled __read_mostly;
|
|
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
|
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
|
static inline void reset_deferred_meminit(pg_data_t *pgdat)
|
|
static inline void reset_deferred_meminit(pg_data_t *pgdat)
|
|
{
|
|
{
|
|
|
|
+ unsigned long max_initialise;
|
|
|
|
+ unsigned long reserved_lowmem;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Initialise at least 2G of a node but also take into account that
|
|
|
|
+ * two large system hashes that can take up 1GB for 0.25TB/node.
|
|
|
|
+ */
|
|
|
|
+ max_initialise = max(2UL << (30 - PAGE_SHIFT),
|
|
|
|
+ (pgdat->node_spanned_pages >> 8));
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Compensate the all the memblock reservations (e.g. crash kernel)
|
|
|
|
+ * from the initial estimation to make sure we will initialize enough
|
|
|
|
+ * memory to boot.
|
|
|
|
+ */
|
|
|
|
+ reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
|
|
|
|
+ pgdat->node_start_pfn + max_initialise);
|
|
|
|
+ max_initialise += reserved_lowmem;
|
|
|
|
+
|
|
|
|
+ pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
|
|
pgdat->first_deferred_pfn = ULONG_MAX;
|
|
pgdat->first_deferred_pfn = ULONG_MAX;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -314,20 +334,11 @@ static inline bool update_defer_init(pg_data_t *pgdat,
|
|
unsigned long pfn, unsigned long zone_end,
|
|
unsigned long pfn, unsigned long zone_end,
|
|
unsigned long *nr_initialised)
|
|
unsigned long *nr_initialised)
|
|
{
|
|
{
|
|
- unsigned long max_initialise;
|
|
|
|
-
|
|
|
|
/* Always populate low zones for address-contrained allocations */
|
|
/* Always populate low zones for address-contrained allocations */
|
|
if (zone_end < pgdat_end_pfn(pgdat))
|
|
if (zone_end < pgdat_end_pfn(pgdat))
|
|
return true;
|
|
return true;
|
|
- /*
|
|
|
|
- * Initialise at least 2G of a node but also take into account that
|
|
|
|
- * two large system hashes that can take up 1GB for 0.25TB/node.
|
|
|
|
- */
|
|
|
|
- max_initialise = max(2UL << (30 - PAGE_SHIFT),
|
|
|
|
- (pgdat->node_spanned_pages >> 8));
|
|
|
|
-
|
|
|
|
(*nr_initialised)++;
|
|
(*nr_initialised)++;
|
|
- if ((*nr_initialised > max_initialise) &&
|
|
|
|
|
|
+ if ((*nr_initialised > pgdat->static_init_size) &&
|
|
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
|
|
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
|
|
pgdat->first_deferred_pfn = pfn;
|
|
pgdat->first_deferred_pfn = pfn;
|
|
return false;
|
|
return false;
|
|
@@ -3870,7 +3881,9 @@ retry:
|
|
goto got_pg;
|
|
goto got_pg;
|
|
|
|
|
|
/* Avoid allocations with no watermarks from looping endlessly */
|
|
/* Avoid allocations with no watermarks from looping endlessly */
|
|
- if (test_thread_flag(TIF_MEMDIE))
|
|
|
|
|
|
+ if (test_thread_flag(TIF_MEMDIE) &&
|
|
|
|
+ (alloc_flags == ALLOC_NO_WATERMARKS ||
|
|
|
|
+ (gfp_mask & __GFP_NOMEMALLOC)))
|
|
goto nopage;
|
|
goto nopage;
|
|
|
|
|
|
/* Retry as long as the OOM killer is making progress */
|
|
/* Retry as long as the OOM killer is making progress */
|
|
@@ -6136,7 +6149,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
|
|
/* pg_data_t should be reset to zero when it's allocated */
|
|
/* pg_data_t should be reset to zero when it's allocated */
|
|
WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
|
|
WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
|
|
|
|
|
|
- reset_deferred_meminit(pgdat);
|
|
|
|
pgdat->node_id = nid;
|
|
pgdat->node_id = nid;
|
|
pgdat->node_start_pfn = node_start_pfn;
|
|
pgdat->node_start_pfn = node_start_pfn;
|
|
pgdat->per_cpu_nodestats = NULL;
|
|
pgdat->per_cpu_nodestats = NULL;
|
|
@@ -6158,6 +6170,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
|
|
(unsigned long)pgdat->node_mem_map);
|
|
(unsigned long)pgdat->node_mem_map);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+ reset_deferred_meminit(pgdat);
|
|
free_area_init_core(pgdat);
|
|
free_area_init_core(pgdat);
|
|
}
|
|
}
|
|
|
|
|