|
@@ -45,6 +45,7 @@ int page_cluster;
|
|
|
static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
|
|
|
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
|
|
|
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
|
|
|
+static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
|
|
|
|
|
|
/*
|
|
|
* This path almost never happens for VM activity - pages are normally
|
|
@@ -554,6 +555,24 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
|
|
|
update_page_reclaim_stat(lruvec, file, 0);
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
|
|
|
+ void *arg)
|
|
|
+{
|
|
|
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
|
|
|
+ int file = page_is_file_cache(page);
|
|
|
+ int lru = page_lru_base_type(page);
|
|
|
+
|
|
|
+ del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
|
|
|
+ ClearPageActive(page);
|
|
|
+ ClearPageReferenced(page);
|
|
|
+ add_page_to_lru_list(page, lruvec, lru);
|
|
|
+
|
|
|
+ __count_vm_event(PGDEACTIVATE);
|
|
|
+ update_page_reclaim_stat(lruvec, file, 0);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Drain pages out of the cpu's pagevecs.
|
|
|
* Either "cpu" is the current CPU, and preemption has already been
|
|
@@ -580,6 +599,10 @@ void lru_add_drain_cpu(int cpu)
|
|
|
if (pagevec_count(pvec))
|
|
|
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
|
|
|
|
|
|
+ pvec = &per_cpu(lru_deactivate_pvecs, cpu);
|
|
|
+ if (pagevec_count(pvec))
|
|
|
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
|
|
+
|
|
|
activate_page_drain(cpu);
|
|
|
}
|
|
|
|
|
@@ -609,6 +632,26 @@ void deactivate_file_page(struct page *page)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * deactivate_page - deactivate a page
|
|
|
+ * @page: page to deactivate
|
|
|
+ *
|
|
|
+ * deactivate_page() moves @page to the inactive list if @page was on the active
|
|
|
+ * list and was not an unevictable page. This is done to accelerate the reclaim
|
|
|
+ * of @page.
|
|
|
+ */
|
|
|
+void deactivate_page(struct page *page)
|
|
|
+{
|
|
|
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
|
|
|
+ struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
|
|
|
+
|
|
|
+ page_cache_get(page);
|
|
|
+ if (!pagevec_add(pvec, page))
|
|
|
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
|
|
+ put_cpu_var(lru_deactivate_pvecs);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
void lru_add_drain(void)
|
|
|
{
|
|
|
lru_add_drain_cpu(get_cpu());
|
|
@@ -638,6 +681,7 @@ void lru_add_drain_all(void)
|
|
|
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
|
|
|
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
|
|
|
pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
|
|
|
+ pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
|
|
|
need_activate_page_drain(cpu)) {
|
|
|
INIT_WORK(work, lru_add_drain_per_cpu);
|
|
|
schedule_work_on(cpu, work);
|