|
@@ -966,7 +966,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
int may_enter_fs;
|
|
|
enum page_references references = PAGEREF_RECLAIM_CLEAN;
|
|
|
bool dirty, writeback;
|
|
|
- bool lazyfree = false;
|
|
|
int ret = SWAP_SUCCESS;
|
|
|
|
|
|
cond_resched();
|
|
@@ -1120,7 +1119,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
goto keep_locked;
|
|
|
if (!add_to_swap(page, page_list))
|
|
|
goto activate_locked;
|
|
|
- lazyfree = true;
|
|
|
may_enter_fs = 1;
|
|
|
|
|
|
/* Adding to swap updated mapping */
|
|
@@ -1138,9 +1136,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
* processes. Try to unmap it here.
|
|
|
*/
|
|
|
if (page_mapped(page) && mapping) {
|
|
|
- switch (ret = try_to_unmap(page, lazyfree ?
|
|
|
- (ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) :
|
|
|
- (ttu_flags | TTU_BATCH_FLUSH))) {
|
|
|
+ switch (ret = try_to_unmap(page,
|
|
|
+ ttu_flags | TTU_BATCH_FLUSH)) {
|
|
|
case SWAP_FAIL:
|
|
|
nr_unmap_fail++;
|
|
|
goto activate_locked;
|
|
@@ -1348,7 +1345,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
|
|
|
}
|
|
|
|
|
|
ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
|
|
|
- TTU_UNMAP|TTU_IGNORE_ACCESS, NULL, true);
|
|
|
+ TTU_IGNORE_ACCESS, NULL, true);
|
|
|
list_splice(&clean_pages, page_list);
|
|
|
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
|
|
|
return ret;
|
|
@@ -1740,7 +1737,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
if (nr_taken == 0)
|
|
|
return 0;
|
|
|
|
|
|
- nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP,
|
|
|
+ nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
|
|
|
&stat, false);
|
|
|
|
|
|
spin_lock_irq(&pgdat->lru_lock);
|