|
@@ -1270,18 +1270,34 @@ void page_add_new_anon_rmap(struct page *page,
|
|
|
*
|
|
|
* The caller needs to hold the pte lock.
|
|
|
*/
|
|
|
-void page_add_file_rmap(struct page *page)
|
|
|
+void page_add_file_rmap(struct page *page, bool compound)
|
|
|
{
|
|
|
+ int i, nr = 1;
|
|
|
+
|
|
|
+ VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
|
|
|
lock_page_memcg(page);
|
|
|
- if (atomic_inc_and_test(&page->_mapcount)) {
|
|
|
- __inc_zone_page_state(page, NR_FILE_MAPPED);
|
|
|
- mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
|
|
+ if (compound && PageTransHuge(page)) {
|
|
|
+ for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
|
|
|
+ if (atomic_inc_and_test(&page[i]._mapcount))
|
|
|
+ nr++;
|
|
|
+ }
|
|
|
+ if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
|
|
|
+ goto out;
|
|
|
+ } else {
|
|
|
+ if (!atomic_inc_and_test(&page->_mapcount))
|
|
|
+ goto out;
|
|
|
}
|
|
|
+ __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, nr);
|
|
|
+ mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
|
|
+out:
|
|
|
unlock_page_memcg(page);
|
|
|
}
|
|
|
|
|
|
-static void page_remove_file_rmap(struct page *page)
|
|
|
+static void page_remove_file_rmap(struct page *page, bool compound)
|
|
|
{
|
|
|
+ int i, nr = 1;
|
|
|
+
|
|
|
+ VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
|
|
|
lock_page_memcg(page);
|
|
|
|
|
|
/* Hugepages are not counted in NR_FILE_MAPPED for now. */
|
|
@@ -1292,15 +1308,24 @@ static void page_remove_file_rmap(struct page *page)
|
|
|
}
|
|
|
|
|
|
/* page still mapped by someone else? */
|
|
|
- if (!atomic_add_negative(-1, &page->_mapcount))
|
|
|
- goto out;
|
|
|
+ if (compound && PageTransHuge(page)) {
|
|
|
+ for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
|
|
|
+ if (atomic_add_negative(-1, &page[i]._mapcount))
|
|
|
+ nr++;
|
|
|
+ }
|
|
|
+ if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
|
|
|
+ goto out;
|
|
|
+ } else {
|
|
|
+ if (!atomic_add_negative(-1, &page->_mapcount))
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
|
|
* these counters are not modified in interrupt context, and
|
|
|
* pte lock(a spinlock) is held, which implies preemption disabled.
|
|
|
*/
|
|
|
- __dec_zone_page_state(page, NR_FILE_MAPPED);
|
|
|
+ __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, -nr);
|
|
|
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
|
|
|
|
|
if (unlikely(PageMlocked(page)))
|
|
@@ -1356,11 +1381,8 @@ static void page_remove_anon_compound_rmap(struct page *page)
|
|
|
*/
|
|
|
void page_remove_rmap(struct page *page, bool compound)
|
|
|
{
|
|
|
- if (!PageAnon(page)) {
|
|
|
- VM_BUG_ON_PAGE(compound && !PageHuge(page), page);
|
|
|
- page_remove_file_rmap(page);
|
|
|
- return;
|
|
|
- }
|
|
|
+ if (!PageAnon(page))
|
|
|
+ return page_remove_file_rmap(page, compound);
|
|
|
|
|
|
if (compound)
|
|
|
return page_remove_anon_compound_rmap(page);
|