|
@@ -1214,7 +1214,7 @@ void do_page_add_anon_rmap(struct page *page,
|
|
*/
|
|
*/
|
|
if (compound)
|
|
if (compound)
|
|
__inc_zone_page_state(page, NR_ANON_THPS);
|
|
__inc_zone_page_state(page, NR_ANON_THPS);
|
|
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
|
|
|
|
|
|
+ __mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, nr);
|
|
}
|
|
}
|
|
if (unlikely(PageKsm(page)))
|
|
if (unlikely(PageKsm(page)))
|
|
return;
|
|
return;
|
|
@@ -1258,7 +1258,7 @@ void page_add_new_anon_rmap(struct page *page,
|
|
/* increment count (starts at -1) */
|
|
/* increment count (starts at -1) */
|
|
atomic_set(&page->_mapcount, 0);
|
|
atomic_set(&page->_mapcount, 0);
|
|
}
|
|
}
|
|
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
|
|
|
|
|
|
+ __mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, nr);
|
|
__page_set_anon_rmap(page, vma, address, 1);
|
|
__page_set_anon_rmap(page, vma, address, 1);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1293,7 +1293,7 @@ void page_add_file_rmap(struct page *page, bool compound)
|
|
if (!atomic_inc_and_test(&page->_mapcount))
|
|
if (!atomic_inc_and_test(&page->_mapcount))
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
- __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, nr);
|
|
|
|
|
|
+ __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
|
|
mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
|
mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
|
out:
|
|
out:
|
|
unlock_page_memcg(page);
|
|
unlock_page_memcg(page);
|
|
@@ -1329,11 +1329,11 @@ static void page_remove_file_rmap(struct page *page, bool compound)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
|
|
|
|
|
+ * We use the irq-unsafe __{inc|mod}_zone_page_state because
|
|
* these counters are not modified in interrupt context, and
|
|
* these counters are not modified in interrupt context, and
|
|
* pte lock(a spinlock) is held, which implies preemption disabled.
|
|
* pte lock(a spinlock) is held, which implies preemption disabled.
|
|
*/
|
|
*/
|
|
- __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, -nr);
|
|
|
|
|
|
+ __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
|
|
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
|
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
|
|
|
|
|
if (unlikely(PageMlocked(page)))
|
|
if (unlikely(PageMlocked(page)))
|
|
@@ -1375,7 +1375,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
|
|
clear_page_mlock(page);
|
|
clear_page_mlock(page);
|
|
|
|
|
|
if (nr) {
|
|
if (nr) {
|
|
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr);
|
|
|
|
|
|
+ __mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, -nr);
|
|
deferred_split_huge_page(page);
|
|
deferred_split_huge_page(page);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1404,7 +1404,7 @@ void page_remove_rmap(struct page *page, bool compound)
|
|
* these counters are not modified in interrupt context, and
|
|
* these counters are not modified in interrupt context, and
|
|
* pte lock(a spinlock) is held, which implies preemption disabled.
|
|
* pte lock(a spinlock) is held, which implies preemption disabled.
|
|
*/
|
|
*/
|
|
- __dec_zone_page_state(page, NR_ANON_PAGES);
|
|
|
|
|
|
+ __dec_node_page_state(page, NR_ANON_PAGES);
|
|
|
|
|
|
if (unlikely(PageMlocked(page)))
|
|
if (unlikely(PageMlocked(page)))
|
|
clear_page_mlock(page);
|
|
clear_page_mlock(page);
|