|
|
@@ -1238,6 +1238,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
|
|
|
}
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
+static bool gfp_thisnode_allocation(gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
|
|
|
+}
|
|
|
+#else
|
|
|
+static bool gfp_thisnode_allocation(gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
|
@@ -1574,7 +1583,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
|
|
|
get_pageblock_migratetype(page));
|
|
|
}
|
|
|
|
|
|
- __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
|
|
|
+ /*
|
|
|
+ * NOTE: GFP_THISNODE allocations do not partake in the kswapd
|
|
|
+ * aging protocol, so they can't be fair.
|
|
|
+ */
|
|
|
+ if (!gfp_thisnode_allocation(gfp_flags))
|
|
|
+ __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
|
|
|
+
|
|
|
__count_zone_vm_events(PGALLOC, zone, 1 << order);
|
|
|
zone_statistics(preferred_zone, zone, gfp_flags);
|
|
|
local_irq_restore(flags);
|
|
|
@@ -1946,8 +1961,12 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
|
|
* ultimately fall back to remote zones that do not
|
|
|
* partake in the fairness round-robin cycle of this
|
|
|
* zonelist.
|
|
|
+ *
|
|
|
+ * NOTE: GFP_THISNODE allocations do not partake in
|
|
|
+ * the kswapd aging protocol, so they can't be fair.
|
|
|
*/
|
|
|
- if (alloc_flags & ALLOC_WMARK_LOW) {
|
|
|
+ if ((alloc_flags & ALLOC_WMARK_LOW) &&
|
|
|
+ !gfp_thisnode_allocation(gfp_mask)) {
|
|
|
if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
|
|
|
continue;
|
|
|
if (!zone_local(preferred_zone, zone))
|
|
|
@@ -2503,8 +2522,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
|
* allowed per node queues are empty and that nodes are
|
|
|
* over allocated.
|
|
|
*/
|
|
|
- if (IS_ENABLED(CONFIG_NUMA) &&
|
|
|
- (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
|
|
|
+ if (gfp_thisnode_allocation(gfp_mask))
|
|
|
goto nopage;
|
|
|
|
|
|
restart:
|