|
@@ -1148,10 +1148,15 @@ void page_add_new_anon_rmap(struct page *page,
|
|
*/
|
|
*/
|
|
void page_add_file_rmap(struct page *page)
|
|
void page_add_file_rmap(struct page *page)
|
|
{
|
|
{
|
|
|
|
+ bool locked;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ mem_cgroup_begin_update_page_stat(page, &locked, &flags);
|
|
if (atomic_inc_and_test(&page->_mapcount)) {
|
|
if (atomic_inc_and_test(&page->_mapcount)) {
|
|
__inc_zone_page_state(page, NR_FILE_MAPPED);
|
|
__inc_zone_page_state(page, NR_FILE_MAPPED);
|
|
mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
|
|
mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
|
|
}
|
|
}
|
|
|
|
+ mem_cgroup_end_update_page_stat(page, &locked, &flags);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -1162,9 +1167,21 @@ void page_add_file_rmap(struct page *page)
|
|
*/
|
|
*/
|
|
void page_remove_rmap(struct page *page)
|
|
void page_remove_rmap(struct page *page)
|
|
{
|
|
{
|
|
|
|
+ bool anon = PageAnon(page);
|
|
|
|
+ bool locked;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * The anon case has no mem_cgroup page_stat to update; but may
|
|
|
|
+ * uncharge_page() below, where the lock ordering can deadlock if
|
|
|
|
+ * we hold the lock against page_stat move: so avoid it on anon.
|
|
|
|
+ */
|
|
|
|
+ if (!anon)
|
|
|
|
+ mem_cgroup_begin_update_page_stat(page, &locked, &flags);
|
|
|
|
+
|
|
/* page still mapped by someone else? */
|
|
/* page still mapped by someone else? */
|
|
if (!atomic_add_negative(-1, &page->_mapcount))
|
|
if (!atomic_add_negative(-1, &page->_mapcount))
|
|
- return;
|
|
|
|
|
|
+ goto out;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Now that the last pte has gone, s390 must transfer dirty
|
|
* Now that the last pte has gone, s390 must transfer dirty
|
|
@@ -1173,7 +1190,7 @@ void page_remove_rmap(struct page *page)
|
|
* not if it's in swapcache - there might be another pte slot
|
|
* not if it's in swapcache - there might be another pte slot
|
|
* containing the swap entry, but page not yet written to swap.
|
|
* containing the swap entry, but page not yet written to swap.
|
|
*/
|
|
*/
|
|
- if ((!PageAnon(page) || PageSwapCache(page)) &&
|
|
|
|
|
|
+ if ((!anon || PageSwapCache(page)) &&
|
|
page_test_and_clear_dirty(page_to_pfn(page), 1))
|
|
page_test_and_clear_dirty(page_to_pfn(page), 1))
|
|
set_page_dirty(page);
|
|
set_page_dirty(page);
|
|
/*
|
|
/*
|
|
@@ -1181,8 +1198,8 @@ void page_remove_rmap(struct page *page)
|
|
* and not charged by memcg for now.
|
|
* and not charged by memcg for now.
|
|
*/
|
|
*/
|
|
if (unlikely(PageHuge(page)))
|
|
if (unlikely(PageHuge(page)))
|
|
- return;
|
|
|
|
- if (PageAnon(page)) {
|
|
|
|
|
|
+ goto out;
|
|
|
|
+ if (anon) {
|
|
mem_cgroup_uncharge_page(page);
|
|
mem_cgroup_uncharge_page(page);
|
|
if (!PageTransHuge(page))
|
|
if (!PageTransHuge(page))
|
|
__dec_zone_page_state(page, NR_ANON_PAGES);
|
|
__dec_zone_page_state(page, NR_ANON_PAGES);
|
|
@@ -1202,6 +1219,9 @@ void page_remove_rmap(struct page *page)
|
|
* Leaving it set also helps swapoff to reinstate ptes
|
|
* Leaving it set also helps swapoff to reinstate ptes
|
|
* faster for those pages still in swapcache.
|
|
* faster for those pages still in swapcache.
|
|
*/
|
|
*/
|
|
|
|
+out:
|
|
|
|
+ if (!anon)
|
|
|
|
+ mem_cgroup_end_update_page_stat(page, &locked, &flags);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|