|
@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
|
|
|
return;
|
|
|
|
|
|
page_ext = lookup_page_ext(page);
|
|
|
+ if (unlikely(!page_ext))
|
|
|
+ return;
|
|
|
+
|
|
|
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
|
|
|
|
|
|
INIT_LIST_HEAD(&page->lru);
|
|
@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
|
|
|
return;
|
|
|
|
|
|
page_ext = lookup_page_ext(page);
|
|
|
+ if (unlikely(!page_ext))
|
|
|
+ return;
|
|
|
+
|
|
|
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
|
|
|
|
|
|
set_page_private(page, 0);
|
|
@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
|
|
|
page = list_last_entry(list, struct page, lru);
|
|
|
else
|
|
|
page = list_first_entry(list, struct page, lru);
|
|
|
- } while (page && check_new_pcp(page));
|
|
|
|
|
|
- __dec_zone_state(zone, NR_ALLOC_BATCH);
|
|
|
- list_del(&page->lru);
|
|
|
- pcp->count--;
|
|
|
+ __dec_zone_state(zone, NR_ALLOC_BATCH);
|
|
|
+ list_del(&page->lru);
|
|
|
+ pcp->count--;
|
|
|
+
|
|
|
+ } while (check_new_pcp(page));
|
|
|
} else {
|
|
|
/*
|
|
|
* We most definitely don't want callers attempting to
|
|
@@ -3023,6 +3030,7 @@ reset_fair:
|
|
|
apply_fair = false;
|
|
|
fair_skipped = false;
|
|
|
reset_alloc_batches(ac->preferred_zoneref->zone);
|
|
|
+ z = ac->preferred_zoneref;
|
|
|
goto zonelist_scan;
|
|
|
}
|
|
|
|
|
@@ -3596,6 +3604,17 @@ retry:
|
|
|
*/
|
|
|
alloc_flags = gfp_to_alloc_flags(gfp_mask);
|
|
|
|
|
|
+ /*
|
|
|
+ * Reset the zonelist iterators if memory policies can be ignored.
|
|
|
+ * These allocations are high priority and system rather than user
|
|
|
+ * orientated.
|
|
|
+ */
|
|
|
+ if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
|
|
|
+ ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
|
|
|
+ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
|
|
|
+ ac->high_zoneidx, ac->nodemask);
|
|
|
+ }
|
|
|
+
|
|
|
/* This is the last chance, in general, before the goto nopage. */
|
|
|
page = get_page_from_freelist(gfp_mask, order,
|
|
|
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
|
|
@@ -3604,12 +3623,6 @@ retry:
|
|
|
|
|
|
/* Allocate without watermarks if the context allows */
|
|
|
if (alloc_flags & ALLOC_NO_WATERMARKS) {
|
|
|
- /*
|
|
|
- * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
|
|
|
- * the allocation is high priority and these type of
|
|
|
- * allocations are system rather than user orientated
|
|
|
- */
|
|
|
- ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
|
|
|
page = get_page_from_freelist(gfp_mask, order,
|
|
|
ALLOC_NO_WATERMARKS, ac);
|
|
|
if (page)
|
|
@@ -3808,7 +3821,11 @@ retry_cpuset:
|
|
|
/* Dirty zone balancing only done in the fast path */
|
|
|
ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
|
|
|
|
|
|
- /* The preferred zone is used for statistics later */
|
|
|
+ /*
|
|
|
+ * The preferred zone is used for statistics but crucially it is
|
|
|
+ * also used as the starting point for the zonelist iterator. It
|
|
|
+ * may get reset for allocations that ignore memory policies.
|
|
|
+ */
|
|
|
ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
|
|
|
ac.high_zoneidx, ac.nodemask);
|
|
|
if (!ac.preferred_zoneref) {
|