|
@@ -920,7 +920,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
/* Case 1 above */
|
|
|
if (current_is_kswapd() &&
|
|
|
PageReclaim(page) &&
|
|
|
- zone_is_reclaim_writeback(zone)) {
|
|
|
+ test_bit(ZONE_WRITEBACK, &zone->flags)) {
|
|
|
nr_immediate++;
|
|
|
goto keep_locked;
|
|
|
|
|
@@ -1002,7 +1002,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
*/
|
|
|
if (page_is_file_cache(page) &&
|
|
|
(!current_is_kswapd() ||
|
|
|
- !zone_is_reclaim_dirty(zone))) {
|
|
|
+ !test_bit(ZONE_DIRTY, &zone->flags))) {
|
|
|
/*
|
|
|
* Immediately reclaim when written back.
|
|
|
* Similar in principal to deactivate_page()
|
|
@@ -1563,7 +1563,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
* are encountered in the nr_immediate check below.
|
|
|
*/
|
|
|
if (nr_writeback && nr_writeback == nr_taken)
|
|
|
- zone_set_flag(zone, ZONE_WRITEBACK);
|
|
|
+ set_bit(ZONE_WRITEBACK, &zone->flags);
|
|
|
|
|
|
/*
|
|
|
* memcg will stall in page writeback so only consider forcibly
|
|
@@ -1575,16 +1575,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
* backed by a congested BDI and wait_iff_congested will stall.
|
|
|
*/
|
|
|
if (nr_dirty && nr_dirty == nr_congested)
|
|
|
- zone_set_flag(zone, ZONE_CONGESTED);
|
|
|
+ set_bit(ZONE_CONGESTED, &zone->flags);
|
|
|
|
|
|
/*
|
|
|
* If dirty pages are scanned that are not queued for IO, it
|
|
|
* implies that flushers are not keeping up. In this case, flag
|
|
|
- * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
|
|
|
- * pages from reclaim context.
|
|
|
+ * the zone ZONE_DIRTY and kswapd will start writing pages from
|
|
|
+ * reclaim context.
|
|
|
*/
|
|
|
if (nr_unqueued_dirty == nr_taken)
|
|
|
- zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
|
|
|
+ set_bit(ZONE_DIRTY, &zone->flags);
|
|
|
|
|
|
/*
|
|
|
* If kswapd scans pages marked marked for immediate
|
|
@@ -2984,7 +2984,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
|
|
|
/* Account for the number of pages attempted to reclaim */
|
|
|
*nr_attempted += sc->nr_to_reclaim;
|
|
|
|
|
|
- zone_clear_flag(zone, ZONE_WRITEBACK);
|
|
|
+ clear_bit(ZONE_WRITEBACK, &zone->flags);
|
|
|
|
|
|
/*
|
|
|
* If a zone reaches its high watermark, consider it to be no longer
|
|
@@ -2994,8 +2994,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
|
|
|
*/
|
|
|
if (zone_reclaimable(zone) &&
|
|
|
zone_balanced(zone, testorder, 0, classzone_idx)) {
|
|
|
- zone_clear_flag(zone, ZONE_CONGESTED);
|
|
|
- zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
|
|
|
+ clear_bit(ZONE_CONGESTED, &zone->flags);
|
|
|
+ clear_bit(ZONE_DIRTY, &zone->flags);
|
|
|
}
|
|
|
|
|
|
return sc->nr_scanned >= sc->nr_to_reclaim;
|
|
@@ -3086,8 +3086,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
|
|
* If balanced, clear the dirty and congested
|
|
|
* flags
|
|
|
*/
|
|
|
- zone_clear_flag(zone, ZONE_CONGESTED);
|
|
|
- zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
|
|
|
+ clear_bit(ZONE_CONGESTED, &zone->flags);
|
|
|
+ clear_bit(ZONE_DIRTY, &zone->flags);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3714,11 +3714,11 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|
|
if (node_state(node_id, N_CPU) && node_id != numa_node_id())
|
|
|
return ZONE_RECLAIM_NOSCAN;
|
|
|
|
|
|
- if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
|
|
|
+ if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags))
|
|
|
return ZONE_RECLAIM_NOSCAN;
|
|
|
|
|
|
ret = __zone_reclaim(zone, gfp_mask, order);
|
|
|
- zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
|
|
|
+ clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
|
|
|
|
|
|
if (!ret)
|
|
|
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
|