|
@@ -46,7 +46,7 @@ int page_cluster;
|
|
static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
|
|
static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
|
|
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
|
|
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
|
|
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
|
|
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
|
|
-static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
|
|
|
|
|
|
+static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
|
|
static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
|
|
#endif
|
|
#endif
|
|
@@ -571,20 +571,27 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
-static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
|
|
|
|
|
|
+static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
|
|
void *arg)
|
|
void *arg)
|
|
{
|
|
{
|
|
- if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
|
|
|
|
- int file = page_is_file_cache(page);
|
|
|
|
- int lru = page_lru_base_type(page);
|
|
|
|
|
|
+ if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
|
|
|
|
+ !PageUnevictable(page)) {
|
|
|
|
+ bool active = PageActive(page);
|
|
|
|
|
|
- del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
|
|
|
|
|
|
+ del_page_from_lru_list(page, lruvec,
|
|
|
|
+ LRU_INACTIVE_ANON + active);
|
|
ClearPageActive(page);
|
|
ClearPageActive(page);
|
|
ClearPageReferenced(page);
|
|
ClearPageReferenced(page);
|
|
- add_page_to_lru_list(page, lruvec, lru);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * lazyfree pages are clean anonymous pages. They have
|
|
|
|
+ * SwapBacked flag cleared to distinguish normal anonymous
|
|
|
|
+ * pages
|
|
|
|
+ */
|
|
|
|
+ ClearPageSwapBacked(page);
|
|
|
|
+ add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
|
|
|
|
|
|
- __count_vm_event(PGDEACTIVATE);
|
|
|
|
- update_page_reclaim_stat(lruvec, file, 0);
|
|
|
|
|
|
+ __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
|
|
|
|
+ update_page_reclaim_stat(lruvec, 1, 0);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -614,9 +621,9 @@ void lru_add_drain_cpu(int cpu)
|
|
if (pagevec_count(pvec))
|
|
if (pagevec_count(pvec))
|
|
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
|
|
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
|
|
|
|
|
|
- pvec = &per_cpu(lru_deactivate_pvecs, cpu);
|
|
|
|
|
|
+ pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
|
|
if (pagevec_count(pvec))
|
|
if (pagevec_count(pvec))
|
|
- pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
|
|
|
|
|
+ pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
|
|
|
|
|
|
activate_page_drain(cpu);
|
|
activate_page_drain(cpu);
|
|
}
|
|
}
|
|
@@ -648,22 +655,22 @@ void deactivate_file_page(struct page *page)
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * deactivate_page - deactivate a page
|
|
|
|
|
|
+ * mark_page_lazyfree - make an anon page lazyfree
|
|
* @page: page to deactivate
|
|
* @page: page to deactivate
|
|
*
|
|
*
|
|
- * deactivate_page() moves @page to the inactive list if @page was on the active
|
|
|
|
- * list and was not an unevictable page. This is done to accelerate the reclaim
|
|
|
|
- * of @page.
|
|
|
|
|
|
+ * mark_page_lazyfree() moves @page to the inactive file list.
|
|
|
|
+ * This is done to accelerate the reclaim of @page.
|
|
*/
|
|
*/
|
|
-void deactivate_page(struct page *page)
|
|
|
|
|
|
+void mark_page_lazyfree(struct page *page)
|
|
{
|
|
{
|
|
- if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
|
|
|
|
- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
|
|
|
|
|
|
+ if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
|
|
|
|
+ !PageUnevictable(page)) {
|
|
|
|
+ struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
|
|
|
|
|
|
get_page(page);
|
|
get_page(page);
|
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
|
- pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
|
|
|
- put_cpu_var(lru_deactivate_pvecs);
|
|
|
|
|
|
+ pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
|
|
|
|
+ put_cpu_var(lru_lazyfree_pvecs);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -703,7 +710,7 @@ void lru_add_drain_all(void)
|
|
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
|
|
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
|
|
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
|
|
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
|
|
pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
|
|
pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
|
|
- pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
|
|
|
|
|
|
+ pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
|
|
need_activate_page_drain(cpu)) {
|
|
need_activate_page_drain(cpu)) {
|
|
INIT_WORK(work, lru_add_drain_per_cpu);
|
|
INIT_WORK(work, lru_add_drain_per_cpu);
|
|
queue_work_on(cpu, mm_percpu_wq, work);
|
|
queue_work_on(cpu, mm_percpu_wq, work);
|