|
@@ -234,22 +234,39 @@ bool pgdat_reclaimable(struct pglist_data *pgdat)
|
|
pgdat_reclaimable_pages(pgdat) * 6;
|
|
pgdat_reclaimable_pages(pgdat) * 6;
|
|
}
|
|
}
|
|
|
|
|
|
-unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
|
|
|
|
|
+/**
|
|
|
|
+ * lruvec_lru_size - Returns the number of pages on the given LRU list.
|
|
|
|
+ * @lruvec: lru vector
|
|
|
|
+ * @lru: lru to use
|
|
|
|
+ * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
|
|
|
|
+ */
|
|
|
|
+unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
|
|
{
|
|
{
|
|
|
|
+ unsigned long lru_size;
|
|
|
|
+ int zid;
|
|
|
|
+
|
|
if (!mem_cgroup_disabled())
|
|
if (!mem_cgroup_disabled())
|
|
- return mem_cgroup_get_lru_size(lruvec, lru);
|
|
|
|
|
|
+ lru_size = mem_cgroup_get_lru_size(lruvec, lru);
|
|
|
|
+ else
|
|
|
|
+ lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
|
|
|
|
|
|
- return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
|
|
|
|
-}
|
|
|
|
|
|
+ for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
|
|
|
|
+ struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
|
|
|
|
+ unsigned long size;
|
|
|
|
|
|
-unsigned long lruvec_zone_lru_size(struct lruvec *lruvec, enum lru_list lru,
|
|
|
|
- int zone_idx)
|
|
|
|
-{
|
|
|
|
- if (!mem_cgroup_disabled())
|
|
|
|
- return mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx);
|
|
|
|
|
|
+ if (!managed_zone(zone))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ if (!mem_cgroup_disabled())
|
|
|
|
+ size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
|
|
|
|
+ else
|
|
|
|
+ size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
|
|
|
|
+ NR_ZONE_LRU_BASE + lru);
|
|
|
|
+ lru_size -= min(size, lru_size);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return lru_size;
|
|
|
|
|
|
- return zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zone_idx],
|
|
|
|
- NR_ZONE_LRU_BASE + lru);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2049,11 +2066,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
|
|
struct scan_control *sc, bool trace)
|
|
struct scan_control *sc, bool trace)
|
|
{
|
|
{
|
|
unsigned long inactive_ratio;
|
|
unsigned long inactive_ratio;
|
|
- unsigned long total_inactive, inactive;
|
|
|
|
- unsigned long total_active, active;
|
|
|
|
|
|
+ unsigned long inactive, active;
|
|
|
|
+ enum lru_list inactive_lru = file * LRU_FILE;
|
|
|
|
+ enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
|
|
unsigned long gb;
|
|
unsigned long gb;
|
|
- struct pglist_data *pgdat = lruvec_pgdat(lruvec);
|
|
|
|
- int zid;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* If we don't have swap space, anonymous page deactivation
|
|
* If we don't have swap space, anonymous page deactivation
|
|
@@ -2062,27 +2078,8 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
|
|
if (!file && !total_swap_pages)
|
|
if (!file && !total_swap_pages)
|
|
return false;
|
|
return false;
|
|
|
|
|
|
- total_inactive = inactive = lruvec_lru_size(lruvec, file * LRU_FILE);
|
|
|
|
- total_active = active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * For zone-constrained allocations, it is necessary to check if
|
|
|
|
- * deactivations are required for lowmem to be reclaimed. This
|
|
|
|
- * calculates the inactive/active pages available in eligible zones.
|
|
|
|
- */
|
|
|
|
- for (zid = sc->reclaim_idx + 1; zid < MAX_NR_ZONES; zid++) {
|
|
|
|
- struct zone *zone = &pgdat->node_zones[zid];
|
|
|
|
- unsigned long inactive_zone, active_zone;
|
|
|
|
-
|
|
|
|
- if (!managed_zone(zone))
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- inactive_zone = lruvec_zone_lru_size(lruvec, file * LRU_FILE, zid);
|
|
|
|
- active_zone = lruvec_zone_lru_size(lruvec, (file * LRU_FILE) + LRU_ACTIVE, zid);
|
|
|
|
-
|
|
|
|
- inactive -= min(inactive, inactive_zone);
|
|
|
|
- active -= min(active, active_zone);
|
|
|
|
- }
|
|
|
|
|
|
+ inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
|
|
|
|
+ active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
|
|
|
|
|
|
gb = (inactive + active) >> (30 - PAGE_SHIFT);
|
|
gb = (inactive + active) >> (30 - PAGE_SHIFT);
|
|
if (gb)
|
|
if (gb)
|
|
@@ -2091,10 +2088,12 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
|
|
inactive_ratio = 1;
|
|
inactive_ratio = 1;
|
|
|
|
|
|
if (trace)
|
|
if (trace)
|
|
- trace_mm_vmscan_inactive_list_is_low(pgdat->node_id,
|
|
|
|
|
|
+ trace_mm_vmscan_inactive_list_is_low(lruvec_pgdat(lruvec)->node_id,
|
|
sc->reclaim_idx,
|
|
sc->reclaim_idx,
|
|
- total_inactive, inactive,
|
|
|
|
- total_active, active, inactive_ratio, file);
|
|
|
|
|
|
+ lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
|
|
|
|
+ lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
|
|
|
|
+ inactive_ratio, file);
|
|
|
|
+
|
|
return inactive * inactive_ratio < active;
|
|
return inactive * inactive_ratio < active;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2234,7 +2233,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
|
|
* system is under heavy pressure.
|
|
* system is under heavy pressure.
|
|
*/
|
|
*/
|
|
if (!inactive_list_is_low(lruvec, true, sc, false) &&
|
|
if (!inactive_list_is_low(lruvec, true, sc, false) &&
|
|
- lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
|
|
|
|
|
|
+ lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES) >> sc->priority) {
|
|
scan_balance = SCAN_FILE;
|
|
scan_balance = SCAN_FILE;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
@@ -2260,10 +2259,10 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
|
|
* anon in [0], file in [1]
|
|
* anon in [0], file in [1]
|
|
*/
|
|
*/
|
|
|
|
|
|
- anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) +
|
|
|
|
- lruvec_lru_size(lruvec, LRU_INACTIVE_ANON);
|
|
|
|
- file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) +
|
|
|
|
- lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
|
|
|
|
|
|
+ anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
|
|
|
|
+ lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
|
|
|
|
+ file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
|
|
|
|
+ lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
|
|
|
|
|
|
spin_lock_irq(&pgdat->lru_lock);
|
|
spin_lock_irq(&pgdat->lru_lock);
|
|
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
|
|
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
|
|
@@ -2301,7 +2300,7 @@ out:
|
|
unsigned long size;
|
|
unsigned long size;
|
|
unsigned long scan;
|
|
unsigned long scan;
|
|
|
|
|
|
- size = lruvec_lru_size(lruvec, lru);
|
|
|
|
|
|
+ size = lruvec_lru_size(lruvec, lru, MAX_NR_ZONES);
|
|
scan = size >> sc->priority;
|
|
scan = size >> sc->priority;
|
|
|
|
|
|
if (!scan && pass && force_scan)
|
|
if (!scan && pass && force_scan)
|