|
@@ -970,7 +970,8 @@ static inline int check_new_page(struct page *page)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
|
|
|
+static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
|
|
|
+ int alloc_flags)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
@@ -994,6 +995,14 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
|
|
|
|
|
|
set_page_owner(page, order, gfp_flags);
|
|
|
|
|
|
+ /*
|
|
|
+ * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
|
|
|
+ * allocate the page. The expectation is that the caller is taking
|
|
|
+ * steps that will free more memory. The caller should avoid the page
|
|
|
+ * being used for !PFMEMALLOC purposes.
|
|
|
+ */
|
|
|
+ page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1642,9 +1651,7 @@ int split_free_page(struct page *page)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
|
|
|
- * we cheat by calling it from here, in the order > 0 path. Saves a branch
|
|
|
- * or two.
|
|
|
+ * Allocate a page from the given zone. Use pcplists for order-0 allocations.
|
|
|
*/
|
|
|
static inline
|
|
|
struct page *buffered_rmqueue(struct zone *preferred_zone,
|
|
@@ -1655,7 +1662,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
|
|
|
struct page *page;
|
|
|
bool cold = ((gfp_flags & __GFP_COLD) != 0);
|
|
|
|
|
|
-again:
|
|
|
if (likely(order == 0)) {
|
|
|
struct per_cpu_pages *pcp;
|
|
|
struct list_head *list;
|
|
@@ -1711,8 +1717,6 @@ again:
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
VM_BUG_ON_PAGE(bad_range(zone, page), page);
|
|
|
- if (prep_new_page(page, order, gfp_flags))
|
|
|
- goto again;
|
|
|
return page;
|
|
|
|
|
|
failed:
|
|
@@ -2177,25 +2181,16 @@ zonelist_scan:
|
|
|
try_this_zone:
|
|
|
page = buffered_rmqueue(preferred_zone, zone, order,
|
|
|
gfp_mask, migratetype);
|
|
|
- if (page)
|
|
|
- break;
|
|
|
+ if (page) {
|
|
|
+ if (prep_new_page(page, order, gfp_mask, alloc_flags))
|
|
|
+ goto try_this_zone;
|
|
|
+ return page;
|
|
|
+ }
|
|
|
this_zone_full:
|
|
|
if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
|
|
|
zlc_mark_zone_full(zonelist, z);
|
|
|
}
|
|
|
|
|
|
- if (page) {
|
|
|
- /*
|
|
|
- * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
|
|
|
- * necessary to allocate the page. The expectation is
|
|
|
- * that the caller is taking steps that will free more
|
|
|
- * memory. The caller should avoid the page being used
|
|
|
- * for !PFMEMALLOC purposes.
|
|
|
- */
|
|
|
- page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
|
|
|
- return page;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* The first pass makes sure allocations are spread fairly within the
|
|
|
* local node. However, the local node might have free pages left
|