|
@@ -2291,13 +2291,16 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
|
|
|
* the caller that it should consider retrying the allocation instead of
|
|
|
* further reclaim.
|
|
|
*/
|
|
|
-static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
|
|
+static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc,
|
|
|
+ struct shrink_control *shrink)
|
|
|
{
|
|
|
struct zoneref *z;
|
|
|
struct zone *zone;
|
|
|
unsigned long nr_soft_reclaimed;
|
|
|
unsigned long nr_soft_scanned;
|
|
|
+ unsigned long lru_pages = 0;
|
|
|
bool aborted_reclaim = false;
|
|
|
+ struct reclaim_state *reclaim_state = current->reclaim_state;
|
|
|
|
|
|
/*
|
|
|
* If the number of buffer_heads in the machine exceeds the maximum
|
|
@@ -2307,6 +2310,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
|
|
if (buffer_heads_over_limit)
|
|
|
sc->gfp_mask |= __GFP_HIGHMEM;
|
|
|
|
|
|
+ nodes_clear(shrink->nodes_to_scan);
|
|
|
+
|
|
|
for_each_zone_zonelist_nodemask(zone, z, zonelist,
|
|
|
gfp_zone(sc->gfp_mask), sc->nodemask) {
|
|
|
if (!populated_zone(zone))
|
|
@@ -2318,6 +2323,10 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
|
|
if (global_reclaim(sc)) {
|
|
|
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
|
|
continue;
|
|
|
+
|
|
|
+ lru_pages += zone_reclaimable_pages(zone);
|
|
|
+ node_set(zone_to_nid(zone), shrink->nodes_to_scan);
|
|
|
+
|
|
|
if (sc->priority != DEF_PRIORITY &&
|
|
|
!zone_reclaimable(zone))
|
|
|
continue; /* Let kswapd poll it */
|
|
@@ -2354,6 +2363,20 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
|
|
shrink_zone(zone, sc);
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * Don't shrink slabs when reclaiming memory from over limit cgroups
|
|
|
+ * but do shrink slab at least once when aborting reclaim for
|
|
|
+ * compaction to avoid unevenly scanning file/anon LRU pages over slab
|
|
|
+ * pages.
|
|
|
+ */
|
|
|
+ if (global_reclaim(sc)) {
|
|
|
+ shrink_slab(shrink, sc->nr_scanned, lru_pages);
|
|
|
+ if (reclaim_state) {
|
|
|
+ sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|
|
|
+ reclaim_state->reclaimed_slab = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
return aborted_reclaim;
|
|
|
}
|
|
|
|
|
@@ -2398,9 +2421,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
|
|
struct shrink_control *shrink)
|
|
|
{
|
|
|
unsigned long total_scanned = 0;
|
|
|
- struct reclaim_state *reclaim_state = current->reclaim_state;
|
|
|
- struct zoneref *z;
|
|
|
- struct zone *zone;
|
|
|
unsigned long writeback_threshold;
|
|
|
bool aborted_reclaim;
|
|
|
|
|
@@ -2413,34 +2433,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
|
|
vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
|
|
|
sc->priority);
|
|
|
sc->nr_scanned = 0;
|
|
|
- aborted_reclaim = shrink_zones(zonelist, sc);
|
|
|
-
|
|
|
- /*
|
|
|
- * Don't shrink slabs when reclaiming memory from over limit
|
|
|
- * cgroups but do shrink slab at least once when aborting
|
|
|
- * reclaim for compaction to avoid unevenly scanning file/anon
|
|
|
- * LRU pages over slab pages.
|
|
|
- */
|
|
|
- if (global_reclaim(sc)) {
|
|
|
- unsigned long lru_pages = 0;
|
|
|
-
|
|
|
- nodes_clear(shrink->nodes_to_scan);
|
|
|
- for_each_zone_zonelist_nodemask(zone, z, zonelist,
|
|
|
- gfp_zone(sc->gfp_mask), sc->nodemask) {
|
|
|
- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
|
|
- continue;
|
|
|
-
|
|
|
- lru_pages += zone_reclaimable_pages(zone);
|
|
|
- node_set(zone_to_nid(zone),
|
|
|
- shrink->nodes_to_scan);
|
|
|
- }
|
|
|
+ aborted_reclaim = shrink_zones(zonelist, sc, shrink);
|
|
|
|
|
|
- shrink_slab(shrink, sc->nr_scanned, lru_pages);
|
|
|
- if (reclaim_state) {
|
|
|
- sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|
|
|
- reclaim_state->reclaimed_slab = 0;
|
|
|
- }
|
|
|
- }
|
|
|
total_scanned += sc->nr_scanned;
|
|
|
if (sc->nr_reclaimed >= sc->nr_to_reclaim)
|
|
|
goto out;
|