|
@@ -1054,6 +1054,36 @@ void page_add_file_rmap(struct page *page)
|
|
|
mem_cgroup_end_page_stat(memcg, locked, flags);
|
|
|
}
|
|
|
|
|
|
+static void page_remove_file_rmap(struct page *page)
|
|
|
+{
|
|
|
+ struct mem_cgroup *memcg;
|
|
|
+ unsigned long flags;
|
|
|
+ bool locked;
|
|
|
+
|
|
|
+ memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
|
|
|
+
|
|
|
+ /* page still mapped by someone else? */
|
|
|
+ if (!atomic_add_negative(-1, &page->_mapcount))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /* Hugepages are not counted in NR_FILE_MAPPED for now. */
|
|
|
+ if (unlikely(PageHuge(page)))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
|
|
+ * these counters are not modified in interrupt context, and
|
|
|
+ * pte lock(a spinlock) is held, which implies preemption disabled.
|
|
|
+ */
|
|
|
+ __dec_zone_page_state(page, NR_FILE_MAPPED);
|
|
|
+ mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
|
|
|
+
|
|
|
+ if (unlikely(PageMlocked(page)))
|
|
|
+ clear_page_mlock(page);
|
|
|
+out:
|
|
|
+ mem_cgroup_end_page_stat(memcg, locked, flags);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* page_remove_rmap - take down pte mapping from a page
|
|
|
* @page: page to remove mapping from
|
|
@@ -1062,46 +1092,33 @@ void page_add_file_rmap(struct page *page)
|
|
|
*/
|
|
|
void page_remove_rmap(struct page *page)
|
|
|
{
|
|
|
- struct mem_cgroup *uninitialized_var(memcg);
|
|
|
- bool anon = PageAnon(page);
|
|
|
- unsigned long flags;
|
|
|
- bool locked;
|
|
|
-
|
|
|
- /*
|
|
|
- * The anon case has no mem_cgroup page_stat to update; but may
|
|
|
- * uncharge_page() below, where the lock ordering can deadlock if
|
|
|
- * we hold the lock against page_stat move: so avoid it on anon.
|
|
|
- */
|
|
|
- if (!anon)
|
|
|
- memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
|
|
|
+ if (!PageAnon(page)) {
|
|
|
+ page_remove_file_rmap(page);
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
/* page still mapped by someone else? */
|
|
|
if (!atomic_add_negative(-1, &page->_mapcount))
|
|
|
- goto out;
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Hugepages are not counted in NR_ANON_PAGES for now. */
|
|
|
+ if (unlikely(PageHuge(page)))
|
|
|
+ return;
|
|
|
|
|
|
/*
|
|
|
- * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
|
|
|
- * and not charged by memcg for now.
|
|
|
- *
|
|
|
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
|
|
* these counters are not modified in interrupt context, and
|
|
|
- * these counters are not modified in interrupt context, and
|
|
|
* pte lock(a spinlock) is held, which implies preemption disabled.
|
|
|
*/
|
|
|
- if (unlikely(PageHuge(page)))
|
|
|
- goto out;
|
|
|
- if (anon) {
|
|
|
- if (PageTransHuge(page))
|
|
|
- __dec_zone_page_state(page,
|
|
|
- NR_ANON_TRANSPARENT_HUGEPAGES);
|
|
|
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
|
|
|
- -hpage_nr_pages(page));
|
|
|
- } else {
|
|
|
- __dec_zone_page_state(page, NR_FILE_MAPPED);
|
|
|
- mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
|
|
|
- }
|
|
|
+ if (PageTransHuge(page))
|
|
|
+ __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
|
|
|
+
|
|
|
+ __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
|
|
|
+ -hpage_nr_pages(page));
|
|
|
+
|
|
|
if (unlikely(PageMlocked(page)))
|
|
|
clear_page_mlock(page);
|
|
|
+
|
|
|
/*
|
|
|
* It would be tidy to reset the PageAnon mapping here,
|
|
|
* but that might overwrite a racing page_add_anon_rmap
|
|
@@ -1111,9 +1128,6 @@ void page_remove_rmap(struct page *page)
|
|
|
* Leaving it set also helps swapoff to reinstate ptes
|
|
|
* faster for those pages still in swapcache.
|
|
|
*/
|
|
|
-out:
|
|
|
- if (!anon)
|
|
|
- mem_cgroup_end_page_stat(memcg, locked, flags);
|
|
|
}
|
|
|
|
|
|
/*
|