|
@@ -4531,9 +4531,8 @@ static int mem_cgroup_move_account(struct page *page,
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
|
|
|
|
- * of its source page while we change it: page migration takes
|
|
|
|
- * both pages off the LRU, but page cache replacement doesn't.
|
|
|
|
|
|
+ * Prevent mem_cgroup_replace_page() from looking at
|
|
|
|
+ * page->mem_cgroup of its source page while we change it.
|
|
*/
|
|
*/
|
|
if (!trylock_page(page))
|
|
if (!trylock_page(page))
|
|
goto out;
|
|
goto out;
|
|
@@ -5495,7 +5494,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * mem_cgroup_migrate - migrate a charge to another page
|
|
|
|
|
|
+ * mem_cgroup_replace_page - migrate a charge to another page
|
|
* @oldpage: currently charged page
|
|
* @oldpage: currently charged page
|
|
* @newpage: page to transfer the charge to
|
|
* @newpage: page to transfer the charge to
|
|
* @lrucare: either or both pages might be on the LRU already
|
|
* @lrucare: either or both pages might be on the LRU already
|
|
@@ -5504,16 +5503,13 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
|
|
*
|
|
*
|
|
* Both pages must be locked, @newpage->mapping must be set up.
|
|
* Both pages must be locked, @newpage->mapping must be set up.
|
|
*/
|
|
*/
|
|
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
|
|
|
|
- bool lrucare)
|
|
|
|
|
|
+void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
|
|
{
|
|
{
|
|
struct mem_cgroup *memcg;
|
|
struct mem_cgroup *memcg;
|
|
int isolated;
|
|
int isolated;
|
|
|
|
|
|
VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
|
|
VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
|
|
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
|
|
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
|
|
- VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage);
|
|
|
|
- VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage);
|
|
|
|
VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
|
|
VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
|
|
VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
|
|
VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
|
|
newpage);
|
|
newpage);
|
|
@@ -5525,25 +5521,16 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
|
|
if (newpage->mem_cgroup)
|
|
if (newpage->mem_cgroup)
|
|
return;
|
|
return;
|
|
|
|
|
|
- /*
|
|
|
|
- * Swapcache readahead pages can get migrated before being
|
|
|
|
- * charged, and migration from compaction can happen to an
|
|
|
|
- * uncharged page when the PFN walker finds a page that
|
|
|
|
- * reclaim just put back on the LRU but has not released yet.
|
|
|
|
- */
|
|
|
|
|
|
+ /* Swapcache readahead pages can get replaced before being charged */
|
|
memcg = oldpage->mem_cgroup;
|
|
memcg = oldpage->mem_cgroup;
|
|
if (!memcg)
|
|
if (!memcg)
|
|
return;
|
|
return;
|
|
|
|
|
|
- if (lrucare)
|
|
|
|
- lock_page_lru(oldpage, &isolated);
|
|
|
|
-
|
|
|
|
|
|
+ lock_page_lru(oldpage, &isolated);
|
|
oldpage->mem_cgroup = NULL;
|
|
oldpage->mem_cgroup = NULL;
|
|
|
|
+ unlock_page_lru(oldpage, isolated);
|
|
|
|
|
|
- if (lrucare)
|
|
|
|
- unlock_page_lru(oldpage, isolated);
|
|
|
|
-
|
|
|
|
- commit_charge(newpage, memcg, lrucare);
|
|
|
|
|
|
+ commit_charge(newpage, memcg, true);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|