|
@@ -3565,12 +3565,12 @@ module_init(kswapd_init)
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
/*
|
|
|
- * Zone reclaim mode
|
|
|
+ * Node reclaim mode
|
|
|
*
|
|
|
- * If non-zero call zone_reclaim when the number of free pages falls below
|
|
|
+ * If non-zero call node_reclaim when the number of free pages falls below
|
|
|
* the watermarks.
|
|
|
*/
|
|
|
-int zone_reclaim_mode __read_mostly;
|
|
|
+int node_reclaim_mode __read_mostly;
|
|
|
|
|
|
#define RECLAIM_OFF 0
|
|
|
#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
|
|
@@ -3578,14 +3578,14 @@ int zone_reclaim_mode __read_mostly;
|
|
|
#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
|
|
|
|
|
|
/*
|
|
|
- * Priority for ZONE_RECLAIM. This determines the fraction of pages
|
|
|
+ * Priority for NODE_RECLAIM. This determines the fraction of pages
|
|
|
* of a node considered for each zone_reclaim. 4 scans 1/16th of
|
|
|
* a zone.
|
|
|
*/
|
|
|
-#define ZONE_RECLAIM_PRIORITY 4
|
|
|
+#define NODE_RECLAIM_PRIORITY 4
|
|
|
|
|
|
/*
|
|
|
- * Percentage of pages in a zone that must be unmapped for zone_reclaim to
|
|
|
+ * Percentage of pages in a zone that must be unmapped for node_reclaim to
|
|
|
* occur.
|
|
|
*/
|
|
|
int sysctl_min_unmapped_ratio = 1;
|
|
@@ -3611,7 +3611,7 @@ static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
|
|
|
}
|
|
|
|
|
|
/* Work out how many page cache pages we can reclaim in this reclaim_mode */
|
|
|
-static unsigned long zone_pagecache_reclaimable(struct zone *zone)
|
|
|
+static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
|
|
|
{
|
|
|
unsigned long nr_pagecache_reclaimable;
|
|
|
unsigned long delta = 0;
|
|
@@ -3622,14 +3622,14 @@ static unsigned long zone_pagecache_reclaimable(struct zone *zone)
|
|
|
* pages like swapcache and node_unmapped_file_pages() provides
|
|
|
* a better estimate
|
|
|
*/
|
|
|
- if (zone_reclaim_mode & RECLAIM_UNMAP)
|
|
|
- nr_pagecache_reclaimable = node_page_state(zone->zone_pgdat, NR_FILE_PAGES);
|
|
|
+ if (node_reclaim_mode & RECLAIM_UNMAP)
|
|
|
+ nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
|
|
|
else
|
|
|
- nr_pagecache_reclaimable = node_unmapped_file_pages(zone->zone_pgdat);
|
|
|
+ nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
|
|
|
|
|
|
/* If we can't clean pages, remove dirty pages from consideration */
|
|
|
- if (!(zone_reclaim_mode & RECLAIM_WRITE))
|
|
|
- delta += node_page_state(zone->zone_pgdat, NR_FILE_DIRTY);
|
|
|
+ if (!(node_reclaim_mode & RECLAIM_WRITE))
|
|
|
+ delta += node_page_state(pgdat, NR_FILE_DIRTY);
|
|
|
|
|
|
/* Watch for any possible underflows due to delta */
|
|
|
if (unlikely(delta > nr_pagecache_reclaimable))
|
|
@@ -3639,23 +3639,24 @@ static unsigned long zone_pagecache_reclaimable(struct zone *zone)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Try to free up some pages from this zone through reclaim.
|
|
|
+ * Try to free up some pages from this node through reclaim.
|
|
|
*/
|
|
|
-static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|
|
+static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
|
|
|
{
|
|
|
/* Minimum pages needed in order to stay on node */
|
|
|
const unsigned long nr_pages = 1 << order;
|
|
|
struct task_struct *p = current;
|
|
|
struct reclaim_state reclaim_state;
|
|
|
+ int classzone_idx = gfp_zone(gfp_mask);
|
|
|
struct scan_control sc = {
|
|
|
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
|
|
|
.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
|
|
|
.order = order,
|
|
|
- .priority = ZONE_RECLAIM_PRIORITY,
|
|
|
- .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
|
|
|
- .may_unmap = !!(zone_reclaim_mode & RECLAIM_UNMAP),
|
|
|
+ .priority = NODE_RECLAIM_PRIORITY,
|
|
|
+ .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
|
|
|
+ .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
|
|
|
.may_swap = 1,
|
|
|
- .reclaim_idx = zone_idx(zone),
|
|
|
+ .reclaim_idx = classzone_idx,
|
|
|
};
|
|
|
|
|
|
cond_resched();
|
|
@@ -3669,13 +3670,13 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|
|
reclaim_state.reclaimed_slab = 0;
|
|
|
p->reclaim_state = &reclaim_state;
|
|
|
|
|
|
- if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
|
|
|
+ if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
|
|
|
/*
|
|
|
* Free memory by calling shrink zone with increasing
|
|
|
* priorities until we have enough memory freed.
|
|
|
*/
|
|
|
do {
|
|
|
- shrink_node(zone->zone_pgdat, &sc, zone_idx(zone));
|
|
|
+ shrink_node(pgdat, &sc, classzone_idx);
|
|
|
} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
|
|
|
}
|
|
|
|
|
@@ -3685,49 +3686,47 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|
|
return sc.nr_reclaimed >= nr_pages;
|
|
|
}
|
|
|
|
|
|
-int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|
|
+int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
|
|
|
{
|
|
|
- int node_id;
|
|
|
int ret;
|
|
|
|
|
|
/*
|
|
|
- * Zone reclaim reclaims unmapped file backed pages and
|
|
|
+ * Node reclaim reclaims unmapped file backed pages and
|
|
|
* slab pages if we are over the defined limits.
|
|
|
*
|
|
|
* A small portion of unmapped file backed pages is needed for
|
|
|
* file I/O otherwise pages read by file I/O will be immediately
|
|
|
- * thrown out if the zone is overallocated. So we do not reclaim
|
|
|
- * if less than a specified percentage of the zone is used by
|
|
|
+ * thrown out if the node is overallocated. So we do not reclaim
|
|
|
+ * if less than a specified percentage of the node is used by
|
|
|
* unmapped file backed pages.
|
|
|
*/
|
|
|
- if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
|
|
|
- zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
|
|
|
- return ZONE_RECLAIM_FULL;
|
|
|
+ if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
|
|
|
+ sum_zone_node_page_state(pgdat->node_id, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
|
|
|
+ return NODE_RECLAIM_FULL;
|
|
|
|
|
|
- if (!pgdat_reclaimable(zone->zone_pgdat))
|
|
|
- return ZONE_RECLAIM_FULL;
|
|
|
+ if (!pgdat_reclaimable(pgdat))
|
|
|
+ return NODE_RECLAIM_FULL;
|
|
|
|
|
|
/*
|
|
|
* Do not scan if the allocation should not be delayed.
|
|
|
*/
|
|
|
if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
|
|
|
- return ZONE_RECLAIM_NOSCAN;
|
|
|
+ return NODE_RECLAIM_NOSCAN;
|
|
|
|
|
|
/*
|
|
|
- * Only run zone reclaim on the local zone or on zones that do not
|
|
|
+ * Only run node reclaim on the local node or on nodes that do not
|
|
|
* have associated processors. This will favor the local processor
|
|
|
* over remote processors and spread off node memory allocations
|
|
|
* as wide as possible.
|
|
|
*/
|
|
|
- node_id = zone_to_nid(zone);
|
|
|
- if (node_state(node_id, N_CPU) && node_id != numa_node_id())
|
|
|
- return ZONE_RECLAIM_NOSCAN;
|
|
|
+ if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
|
|
|
+ return NODE_RECLAIM_NOSCAN;
|
|
|
|
|
|
- if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags))
|
|
|
- return ZONE_RECLAIM_NOSCAN;
|
|
|
+ if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
|
|
|
+ return NODE_RECLAIM_NOSCAN;
|
|
|
|
|
|
- ret = __zone_reclaim(zone, gfp_mask, order);
|
|
|
- clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
|
|
|
+ ret = __node_reclaim(pgdat, gfp_mask, order);
|
|
|
+ clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
|
|
|
|
|
|
if (!ret)
|
|
|
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
|