|
@@ -912,6 +912,14 @@ static void page_check_dirty_writeback(struct page *page,
|
|
|
mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
|
|
|
}
|
|
|
|
|
|
+struct reclaim_stat {
|
|
|
+ unsigned nr_dirty;
|
|
|
+ unsigned nr_unqueued_dirty;
|
|
|
+ unsigned nr_congested;
|
|
|
+ unsigned nr_writeback;
|
|
|
+ unsigned nr_immediate;
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* shrink_page_list() returns the number of reclaimed pages
|
|
|
*/
|
|
@@ -919,22 +927,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
struct pglist_data *pgdat,
|
|
|
struct scan_control *sc,
|
|
|
enum ttu_flags ttu_flags,
|
|
|
- unsigned long *ret_nr_dirty,
|
|
|
- unsigned long *ret_nr_unqueued_dirty,
|
|
|
- unsigned long *ret_nr_congested,
|
|
|
- unsigned long *ret_nr_writeback,
|
|
|
- unsigned long *ret_nr_immediate,
|
|
|
+ struct reclaim_stat *stat,
|
|
|
bool force_reclaim)
|
|
|
{
|
|
|
LIST_HEAD(ret_pages);
|
|
|
LIST_HEAD(free_pages);
|
|
|
int pgactivate = 0;
|
|
|
- unsigned long nr_unqueued_dirty = 0;
|
|
|
- unsigned long nr_dirty = 0;
|
|
|
- unsigned long nr_congested = 0;
|
|
|
- unsigned long nr_reclaimed = 0;
|
|
|
- unsigned long nr_writeback = 0;
|
|
|
- unsigned long nr_immediate = 0;
|
|
|
+ unsigned nr_unqueued_dirty = 0;
|
|
|
+ unsigned nr_dirty = 0;
|
|
|
+ unsigned nr_congested = 0;
|
|
|
+ unsigned nr_reclaimed = 0;
|
|
|
+ unsigned nr_writeback = 0;
|
|
|
+ unsigned nr_immediate = 0;
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
@@ -1276,11 +1280,13 @@ keep:
|
|
|
list_splice(&ret_pages, page_list);
|
|
|
count_vm_events(PGACTIVATE, pgactivate);
|
|
|
|
|
|
- *ret_nr_dirty += nr_dirty;
|
|
|
- *ret_nr_congested += nr_congested;
|
|
|
- *ret_nr_unqueued_dirty += nr_unqueued_dirty;
|
|
|
- *ret_nr_writeback += nr_writeback;
|
|
|
- *ret_nr_immediate += nr_immediate;
|
|
|
+ if (stat) {
|
|
|
+ stat->nr_dirty = nr_dirty;
|
|
|
+ stat->nr_congested = nr_congested;
|
|
|
+ stat->nr_unqueued_dirty = nr_unqueued_dirty;
|
|
|
+ stat->nr_writeback = nr_writeback;
|
|
|
+ stat->nr_immediate = nr_immediate;
|
|
|
+ }
|
|
|
return nr_reclaimed;
|
|
|
}
|
|
|
|
|
@@ -1292,7 +1298,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
|
|
|
.priority = DEF_PRIORITY,
|
|
|
.may_unmap = 1,
|
|
|
};
|
|
|
- unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
|
|
|
+ unsigned long ret;
|
|
|
struct page *page, *next;
|
|
|
LIST_HEAD(clean_pages);
|
|
|
|
|
@@ -1305,8 +1311,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
|
|
|
}
|
|
|
|
|
|
ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
|
|
|
- TTU_UNMAP|TTU_IGNORE_ACCESS,
|
|
|
- &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
|
|
|
+ TTU_UNMAP|TTU_IGNORE_ACCESS, NULL, true);
|
|
|
list_splice(&clean_pages, page_list);
|
|
|
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
|
|
|
return ret;
|
|
@@ -1705,11 +1710,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
unsigned long nr_scanned;
|
|
|
unsigned long nr_reclaimed = 0;
|
|
|
unsigned long nr_taken;
|
|
|
- unsigned long nr_dirty = 0;
|
|
|
- unsigned long nr_congested = 0;
|
|
|
- unsigned long nr_unqueued_dirty = 0;
|
|
|
- unsigned long nr_writeback = 0;
|
|
|
- unsigned long nr_immediate = 0;
|
|
|
+ struct reclaim_stat stat = {};
|
|
|
isolate_mode_t isolate_mode = 0;
|
|
|
int file = is_file_lru(lru);
|
|
|
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
|
|
@@ -1754,9 +1755,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
return 0;
|
|
|
|
|
|
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP,
|
|
|
- &nr_dirty, &nr_unqueued_dirty, &nr_congested,
|
|
|
- &nr_writeback, &nr_immediate,
|
|
|
- false);
|
|
|
+ &stat, false);
|
|
|
|
|
|
spin_lock_irq(&pgdat->lru_lock);
|
|
|
|
|
@@ -1790,7 +1789,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
* of pages under pages flagged for immediate reclaim and stall if any
|
|
|
* are encountered in the nr_immediate check below.
|
|
|
*/
|
|
|
- if (nr_writeback && nr_writeback == nr_taken)
|
|
|
+ if (stat.nr_writeback && stat.nr_writeback == nr_taken)
|
|
|
set_bit(PGDAT_WRITEBACK, &pgdat->flags);
|
|
|
|
|
|
/*
|
|
@@ -1802,7 +1801,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
* Tag a zone as congested if all the dirty pages scanned were
|
|
|
* backed by a congested BDI and wait_iff_congested will stall.
|
|
|
*/
|
|
|
- if (nr_dirty && nr_dirty == nr_congested)
|
|
|
+ if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
|
|
|
set_bit(PGDAT_CONGESTED, &pgdat->flags);
|
|
|
|
|
|
/*
|
|
@@ -1811,7 +1810,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
* the pgdat PGDAT_DIRTY and kswapd will start writing pages from
|
|
|
* reclaim context.
|
|
|
*/
|
|
|
- if (nr_unqueued_dirty == nr_taken)
|
|
|
+ if (stat.nr_unqueued_dirty == nr_taken)
|
|
|
set_bit(PGDAT_DIRTY, &pgdat->flags);
|
|
|
|
|
|
/*
|
|
@@ -1820,7 +1819,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
* that pages are cycling through the LRU faster than
|
|
|
* they are written so also forcibly stall.
|
|
|
*/
|
|
|
- if (nr_immediate && current_may_throttle())
|
|
|
+ if (stat.nr_immediate && current_may_throttle())
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
|
}
|
|
|
|