|
@@ -1262,9 +1262,13 @@ out:
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * mem_cgroup_page_lruvec - return lruvec for adding an lru page
|
|
|
+ * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
|
|
|
* @page: the page
|
|
|
* @zone: zone of the page
|
|
|
+ *
|
|
|
+ * This function is only safe when following the LRU page isolation
|
|
|
+ * and putback protocol: the LRU lock must be held, and the page must
|
|
|
+ * either be PageLRU() or the caller must have isolated/allocated it.
|
|
|
*/
|
|
|
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
|
|
|
{
|
|
@@ -1282,13 +1286,9 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
|
|
|
memcg = pc->mem_cgroup;
|
|
|
|
|
|
/*
|
|
|
- * Surreptitiously switch any uncharged offlist page to root:
|
|
|
- * an uncharged page off lru does nothing to secure
|
|
|
- * its former mem_cgroup from sudden removal.
|
|
|
- *
|
|
|
- * Our caller holds lru_lock, and PageCgroupUsed is updated
|
|
|
- * under page_cgroup lock: between them, they make all uses
|
|
|
- * of pc->mem_cgroup safe.
|
|
|
+ * Swapcache readahead pages are added to the LRU - and
|
|
|
+ * possibly migrated - before they are charged. Ensure
|
|
|
+ * pc->mem_cgroup is sane.
|
|
|
*/
|
|
|
if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
|
|
|
pc->mem_cgroup = memcg = root_mem_cgroup;
|