|
@@ -3407,12 +3407,13 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
|
|
memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
|
|
memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
-
|
|
|
|
|
|
+ /*
|
|
|
|
+ * The page is freshly allocated and not visible to any
|
|
|
|
+ * outside callers yet. Set up pc non-atomically.
|
|
|
|
+ */
|
|
pc = lookup_page_cgroup(page);
|
|
pc = lookup_page_cgroup(page);
|
|
- lock_page_cgroup(pc);
|
|
|
|
pc->mem_cgroup = memcg;
|
|
pc->mem_cgroup = memcg;
|
|
- SetPageCgroupUsed(pc);
|
|
|
|
- unlock_page_cgroup(pc);
|
|
|
|
|
|
+ pc->flags = PCG_USED;
|
|
}
|
|
}
|
|
|
|
|
|
void __memcg_kmem_uncharge_pages(struct page *page, int order)
|
|
void __memcg_kmem_uncharge_pages(struct page *page, int order)
|
|
@@ -3422,19 +3423,11 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
|
|
|
|
|
|
|
|
|
|
pc = lookup_page_cgroup(page);
|
|
pc = lookup_page_cgroup(page);
|
|
- /*
|
|
|
|
- * Fast unlocked return. Theoretically might have changed, have to
|
|
|
|
- * check again after locking.
|
|
|
|
- */
|
|
|
|
if (!PageCgroupUsed(pc))
|
|
if (!PageCgroupUsed(pc))
|
|
return;
|
|
return;
|
|
|
|
|
|
- lock_page_cgroup(pc);
|
|
|
|
- if (PageCgroupUsed(pc)) {
|
|
|
|
- memcg = pc->mem_cgroup;
|
|
|
|
- ClearPageCgroupUsed(pc);
|
|
|
|
- }
|
|
|
|
- unlock_page_cgroup(pc);
|
|
|
|
|
|
+ memcg = pc->mem_cgroup;
|
|
|
|
+ pc->flags = 0;
|
|
|
|
|
|
/*
|
|
/*
|
|
* We trust that only if there is a memcg associated with the page, it
|
|
* We trust that only if there is a memcg associated with the page, it
|