|
@@ -2551,17 +2551,8 @@ static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * mem_cgroup_try_charge - try charging a memcg
|
|
|
- * @memcg: memcg to charge
|
|
|
- * @nr_pages: number of pages to charge
|
|
|
- *
|
|
|
- * Returns 0 if @memcg was charged successfully, -EINTR if the charge
|
|
|
- * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed.
|
|
|
- */
|
|
|
-static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
|
|
|
- gfp_t gfp_mask,
|
|
|
- unsigned int nr_pages)
|
|
|
+static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|
|
+ unsigned int nr_pages)
|
|
|
{
|
|
|
unsigned int batch = max(CHARGE_BATCH, nr_pages);
|
|
|
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
|
|
@@ -2660,41 +2651,7 @@ done:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * mem_cgroup_try_charge_mm - try charging a mm
|
|
|
- * @mm: mm_struct to charge
|
|
|
- * @nr_pages: number of pages to charge
|
|
|
- * @oom: trigger OOM if reclaim fails
|
|
|
- *
|
|
|
- * Returns the charged mem_cgroup associated with the given mm_struct or
|
|
|
- * NULL the charge failed.
|
|
|
- */
|
|
|
-static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm,
|
|
|
- gfp_t gfp_mask,
|
|
|
- unsigned int nr_pages)
|
|
|
-
|
|
|
-{
|
|
|
- struct mem_cgroup *memcg;
|
|
|
- int ret;
|
|
|
-
|
|
|
- memcg = get_mem_cgroup_from_mm(mm);
|
|
|
- ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages);
|
|
|
- css_put(&memcg->css);
|
|
|
- if (ret == -EINTR)
|
|
|
- memcg = root_mem_cgroup;
|
|
|
- else if (ret)
|
|
|
- memcg = NULL;
|
|
|
-
|
|
|
- return memcg;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Somemtimes we have to undo a charge we got by try_charge().
|
|
|
- * This function is for that and do uncharge, put css's refcnt.
|
|
|
- * gotten by try_charge().
|
|
|
- */
|
|
|
-static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
|
|
|
- unsigned int nr_pages)
|
|
|
+static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
|
{
|
|
|
unsigned long bytes = nr_pages * PAGE_SIZE;
|
|
|
|
|
@@ -2760,17 +2717,13 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
|
|
|
return memcg;
|
|
|
}
|
|
|
|
|
|
-static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
|
|
- struct page *page,
|
|
|
- unsigned int nr_pages,
|
|
|
- enum charge_type ctype,
|
|
|
- bool lrucare)
|
|
|
+static void commit_charge(struct page *page, struct mem_cgroup *memcg,
|
|
|
+ unsigned int nr_pages, bool anon, bool lrucare)
|
|
|
{
|
|
|
struct page_cgroup *pc = lookup_page_cgroup(page);
|
|
|
struct zone *uninitialized_var(zone);
|
|
|
struct lruvec *lruvec;
|
|
|
bool was_on_lru = false;
|
|
|
- bool anon;
|
|
|
|
|
|
lock_page_cgroup(pc);
|
|
|
VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
|
|
@@ -2807,11 +2760,6 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
}
|
|
|
|
|
|
- if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
|
|
|
- anon = true;
|
|
|
- else
|
|
|
- anon = false;
|
|
|
-
|
|
|
mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
|
|
|
unlock_page_cgroup(pc);
|
|
|
|
|
@@ -2882,21 +2830,21 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT);
|
|
|
+ ret = try_charge(memcg, gfp, size >> PAGE_SHIFT);
|
|
|
if (ret == -EINTR) {
|
|
|
/*
|
|
|
- * mem_cgroup_try_charge() chosed to bypass to root due to
|
|
|
- * OOM kill or fatal signal. Since our only options are to
|
|
|
- * either fail the allocation or charge it to this cgroup, do
|
|
|
- * it as a temporary condition. But we can't fail. From a
|
|
|
- * kmem/slab perspective, the cache has already been selected,
|
|
|
- * by mem_cgroup_kmem_get_cache(), so it is too late to change
|
|
|
+ * try_charge() chose to bypass to root due to OOM kill or
|
|
|
+ * fatal signal. Since our only options are to either fail
|
|
|
+ * the allocation or charge it to this cgroup, do it as a
|
|
|
+ * temporary condition. But we can't fail. From a kmem/slab
|
|
|
+ * perspective, the cache has already been selected, by
|
|
|
+ * mem_cgroup_kmem_get_cache(), so it is too late to change
|
|
|
* our minds.
|
|
|
*
|
|
|
* This condition will only trigger if the task entered
|
|
|
- * memcg_charge_kmem in a sane state, but was OOM-killed during
|
|
|
- * mem_cgroup_try_charge() above. Tasks that were already
|
|
|
- * dying when the allocation triggers should have been already
|
|
|
+ * memcg_charge_kmem in a sane state, but was OOM-killed
|
|
|
+ * during try_charge() above. Tasks that were already dying
|
|
|
+ * when the allocation triggers should have been already
|
|
|
* directed to the root cgroup in memcontrol.h
|
|
|
*/
|
|
|
res_counter_charge_nofail(&memcg->res, size, &fail_res);
|
|
@@ -3618,164 +3566,6 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-int mem_cgroup_charge_anon(struct page *page,
|
|
|
- struct mm_struct *mm, gfp_t gfp_mask)
|
|
|
-{
|
|
|
- unsigned int nr_pages = 1;
|
|
|
- struct mem_cgroup *memcg;
|
|
|
-
|
|
|
- if (mem_cgroup_disabled())
|
|
|
- return 0;
|
|
|
-
|
|
|
- VM_BUG_ON_PAGE(page_mapped(page), page);
|
|
|
- VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
|
|
|
- VM_BUG_ON(!mm);
|
|
|
-
|
|
|
- if (PageTransHuge(page)) {
|
|
|
- nr_pages <<= compound_order(page);
|
|
|
- VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
|
- }
|
|
|
-
|
|
|
- memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages);
|
|
|
- if (!memcg)
|
|
|
- return -ENOMEM;
|
|
|
- __mem_cgroup_commit_charge(memcg, page, nr_pages,
|
|
|
- MEM_CGROUP_CHARGE_TYPE_ANON, false);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * While swap-in, try_charge -> commit or cancel, the page is locked.
|
|
|
- * And when try_charge() successfully returns, one refcnt to memcg without
|
|
|
- * struct page_cgroup is acquired. This refcnt will be consumed by
|
|
|
- * "commit()" or removed by "cancel()"
|
|
|
- */
|
|
|
-static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
|
|
|
- struct page *page,
|
|
|
- gfp_t mask,
|
|
|
- struct mem_cgroup **memcgp)
|
|
|
-{
|
|
|
- struct mem_cgroup *memcg = NULL;
|
|
|
- struct page_cgroup *pc;
|
|
|
- int ret;
|
|
|
-
|
|
|
- pc = lookup_page_cgroup(page);
|
|
|
- /*
|
|
|
- * Every swap fault against a single page tries to charge the
|
|
|
- * page, bail as early as possible. shmem_unuse() encounters
|
|
|
- * already charged pages, too. The USED bit is protected by
|
|
|
- * the page lock, which serializes swap cache removal, which
|
|
|
- * in turn serializes uncharging.
|
|
|
- */
|
|
|
- if (PageCgroupUsed(pc))
|
|
|
- goto out;
|
|
|
- if (do_swap_account)
|
|
|
- memcg = try_get_mem_cgroup_from_page(page);
|
|
|
- if (!memcg)
|
|
|
- memcg = get_mem_cgroup_from_mm(mm);
|
|
|
- ret = mem_cgroup_try_charge(memcg, mask, 1);
|
|
|
- css_put(&memcg->css);
|
|
|
- if (ret == -EINTR)
|
|
|
- memcg = root_mem_cgroup;
|
|
|
- else if (ret)
|
|
|
- return ret;
|
|
|
-out:
|
|
|
- *memcgp = memcg;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
|
|
|
- gfp_t gfp_mask, struct mem_cgroup **memcgp)
|
|
|
-{
|
|
|
- if (mem_cgroup_disabled()) {
|
|
|
- *memcgp = NULL;
|
|
|
- return 0;
|
|
|
- }
|
|
|
- /*
|
|
|
- * A racing thread's fault, or swapoff, may have already
|
|
|
- * updated the pte, and even removed page from swap cache: in
|
|
|
- * those cases unuse_pte()'s pte_same() test will fail; but
|
|
|
- * there's also a KSM case which does need to charge the page.
|
|
|
- */
|
|
|
- if (!PageSwapCache(page)) {
|
|
|
- struct mem_cgroup *memcg;
|
|
|
-
|
|
|
- memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1);
|
|
|
- if (!memcg)
|
|
|
- return -ENOMEM;
|
|
|
- *memcgp = memcg;
|
|
|
- return 0;
|
|
|
- }
|
|
|
- return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
|
|
|
-}
|
|
|
-
|
|
|
-void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
|
|
|
-{
|
|
|
- if (mem_cgroup_disabled())
|
|
|
- return;
|
|
|
- if (!memcg)
|
|
|
- return;
|
|
|
- __mem_cgroup_cancel_charge(memcg, 1);
|
|
|
-}
|
|
|
-
|
|
|
-static void
|
|
|
-__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
|
|
|
- enum charge_type ctype)
|
|
|
-{
|
|
|
- if (mem_cgroup_disabled())
|
|
|
- return;
|
|
|
- if (!memcg)
|
|
|
- return;
|
|
|
-
|
|
|
- __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
|
|
|
- /*
|
|
|
- * Now swap is on-memory. This means this page may be
|
|
|
- * counted both as mem and swap....double count.
|
|
|
- * Fix it by uncharging from memsw. Basically, this SwapCache is stable
|
|
|
- * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
|
|
|
- * may call delete_from_swap_cache() before reach here.
|
|
|
- */
|
|
|
- if (do_swap_account && PageSwapCache(page)) {
|
|
|
- swp_entry_t ent = {.val = page_private(page)};
|
|
|
- mem_cgroup_uncharge_swap(ent);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-void mem_cgroup_commit_charge_swapin(struct page *page,
|
|
|
- struct mem_cgroup *memcg)
|
|
|
-{
|
|
|
- __mem_cgroup_commit_charge_swapin(page, memcg,
|
|
|
- MEM_CGROUP_CHARGE_TYPE_ANON);
|
|
|
-}
|
|
|
-
|
|
|
-int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
|
|
|
- gfp_t gfp_mask)
|
|
|
-{
|
|
|
- enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
|
|
|
- struct mem_cgroup *memcg;
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (mem_cgroup_disabled())
|
|
|
- return 0;
|
|
|
- if (PageCompound(page))
|
|
|
- return 0;
|
|
|
-
|
|
|
- if (PageSwapCache(page)) { /* shmem */
|
|
|
- ret = __mem_cgroup_try_charge_swapin(mm, page,
|
|
|
- gfp_mask, &memcg);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
- __mem_cgroup_commit_charge_swapin(page, memcg, type);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1);
|
|
|
- if (!memcg)
|
|
|
- return -ENOMEM;
|
|
|
- __mem_cgroup_commit_charge(memcg, page, 1, type, false);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
|
|
|
unsigned int nr_pages,
|
|
|
const enum charge_type ctype)
|
|
@@ -4122,7 +3912,6 @@ void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
|
|
|
struct mem_cgroup *memcg = NULL;
|
|
|
unsigned int nr_pages = 1;
|
|
|
struct page_cgroup *pc;
|
|
|
- enum charge_type ctype;
|
|
|
|
|
|
*memcgp = NULL;
|
|
|
|
|
@@ -4184,16 +3973,12 @@ void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
|
|
|
* page. In the case new page is migrated but not remapped, new page's
|
|
|
* mapcount will be finally 0 and we call uncharge in end_migration().
|
|
|
*/
|
|
|
- if (PageAnon(page))
|
|
|
- ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
|
|
|
- else
|
|
|
- ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
|
|
|
/*
|
|
|
* The page is committed to the memcg, but it's not actually
|
|
|
* charged to the res_counter since we plan on replacing the
|
|
|
* old one and only one page is going to be left afterwards.
|
|
|
*/
|
|
|
- __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
|
|
|
+ commit_charge(newpage, memcg, nr_pages, PageAnon(page), false);
|
|
|
}
|
|
|
|
|
|
/* remove redundant charge if migration failed*/
|
|
@@ -4252,7 +4037,6 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
|
|
|
{
|
|
|
struct mem_cgroup *memcg = NULL;
|
|
|
struct page_cgroup *pc;
|
|
|
- enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
|
|
|
|
|
|
if (mem_cgroup_disabled())
|
|
|
return;
|
|
@@ -4278,7 +4062,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
|
|
|
* the newpage may be on LRU(or pagevec for LRU) already. We lock
|
|
|
* LRU while we overwrite pc->mem_cgroup.
|
|
|
*/
|
|
|
- __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
|
|
|
+ commit_charge(newpage, memcg, 1, false, true);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_VM
|
|
@@ -6319,20 +6103,19 @@ static int mem_cgroup_do_precharge(unsigned long count)
|
|
|
int ret;
|
|
|
|
|
|
/* Try a single bulk charge without reclaim first */
|
|
|
- ret = mem_cgroup_try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
|
|
|
+ ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
|
|
|
if (!ret) {
|
|
|
mc.precharge += count;
|
|
|
return ret;
|
|
|
}
|
|
|
if (ret == -EINTR) {
|
|
|
- __mem_cgroup_cancel_charge(root_mem_cgroup, count);
|
|
|
+ cancel_charge(root_mem_cgroup, count);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
/* Try charges one by one with reclaim */
|
|
|
while (count--) {
|
|
|
- ret = mem_cgroup_try_charge(mc.to,
|
|
|
- GFP_KERNEL & ~__GFP_NORETRY, 1);
|
|
|
+ ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
|
|
|
/*
|
|
|
* In case of failure, any residual charges against
|
|
|
* mc.to will be dropped by mem_cgroup_clear_mc()
|
|
@@ -6340,7 +6123,7 @@ static int mem_cgroup_do_precharge(unsigned long count)
|
|
|
* bypassed to root right away or they'll be lost.
|
|
|
*/
|
|
|
if (ret == -EINTR)
|
|
|
- __mem_cgroup_cancel_charge(root_mem_cgroup, 1);
|
|
|
+ cancel_charge(root_mem_cgroup, 1);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
mc.precharge++;
|
|
@@ -6609,7 +6392,7 @@ static void __mem_cgroup_clear_mc(void)
|
|
|
|
|
|
/* we must uncharge all the leftover precharges from mc.to */
|
|
|
if (mc.precharge) {
|
|
|
- __mem_cgroup_cancel_charge(mc.to, mc.precharge);
|
|
|
+ cancel_charge(mc.to, mc.precharge);
|
|
|
mc.precharge = 0;
|
|
|
}
|
|
|
/*
|
|
@@ -6617,7 +6400,7 @@ static void __mem_cgroup_clear_mc(void)
|
|
|
* we must uncharge here.
|
|
|
*/
|
|
|
if (mc.moved_charge) {
|
|
|
- __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
|
|
|
+ cancel_charge(mc.from, mc.moved_charge);
|
|
|
mc.moved_charge = 0;
|
|
|
}
|
|
|
/* we must fixup refcnts and charges */
|
|
@@ -6946,6 +6729,150 @@ static void __init enable_swap_cgroup(void)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+/**
|
|
|
+ * mem_cgroup_try_charge - try charging a page
|
|
|
+ * @page: page to charge
|
|
|
+ * @mm: mm context of the victim
|
|
|
+ * @gfp_mask: reclaim mode
|
|
|
+ * @memcgp: charged memcg return
|
|
|
+ *
|
|
|
+ * Try to charge @page to the memcg that @mm belongs to, reclaiming
|
|
|
+ * pages according to @gfp_mask if necessary.
|
|
|
+ *
|
|
|
+ * Returns 0 on success, with *@memcgp pointing to the charged memcg.
|
|
|
+ * Otherwise, an error code is returned.
|
|
|
+ *
|
|
|
+ * After page->mapping has been set up, the caller must finalize the
|
|
|
+ * charge with mem_cgroup_commit_charge(). Or abort the transaction
|
|
|
+ * with mem_cgroup_cancel_charge() in case page instantiation fails.
|
|
|
+ */
|
|
|
+int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
|
|
|
+ gfp_t gfp_mask, struct mem_cgroup **memcgp)
|
|
|
+{
|
|
|
+ struct mem_cgroup *memcg = NULL;
|
|
|
+ unsigned int nr_pages = 1;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (mem_cgroup_disabled())
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ if (PageSwapCache(page)) {
|
|
|
+ struct page_cgroup *pc = lookup_page_cgroup(page);
|
|
|
+ /*
|
|
|
+ * Every swap fault against a single page tries to charge the
|
|
|
+ * page, bail as early as possible. shmem_unuse() encounters
|
|
|
+ * already charged pages, too. The USED bit is protected by
|
|
|
+ * the page lock, which serializes swap cache removal, which
|
|
|
+ * in turn serializes uncharging.
|
|
|
+ */
|
|
|
+ if (PageCgroupUsed(pc))
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (PageTransHuge(page)) {
|
|
|
+ nr_pages <<= compound_order(page);
|
|
|
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (do_swap_account && PageSwapCache(page))
|
|
|
+ memcg = try_get_mem_cgroup_from_page(page);
|
|
|
+ if (!memcg)
|
|
|
+ memcg = get_mem_cgroup_from_mm(mm);
|
|
|
+
|
|
|
+ ret = try_charge(memcg, gfp_mask, nr_pages);
|
|
|
+
|
|
|
+ css_put(&memcg->css);
|
|
|
+
|
|
|
+ if (ret == -EINTR) {
|
|
|
+ memcg = root_mem_cgroup;
|
|
|
+ ret = 0;
|
|
|
+ }
|
|
|
+out:
|
|
|
+ *memcgp = memcg;
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * mem_cgroup_commit_charge - commit a page charge
|
|
|
+ * @page: page to charge
|
|
|
+ * @memcg: memcg to charge the page to
|
|
|
+ * @lrucare: page might be on LRU already
|
|
|
+ *
|
|
|
+ * Finalize a charge transaction started by mem_cgroup_try_charge(),
|
|
|
+ * after page->mapping has been set up. This must happen atomically
|
|
|
+ * as part of the page instantiation, i.e. under the page table lock
|
|
|
+ * for anonymous pages, under the page lock for page and swap cache.
|
|
|
+ *
|
|
|
+ * In addition, the page must not be on the LRU during the commit, to
|
|
|
+ * prevent racing with task migration. If it might be, use @lrucare.
|
|
|
+ *
|
|
|
+ * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
|
|
|
+ */
|
|
|
+void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
|
|
|
+ bool lrucare)
|
|
|
+{
|
|
|
+ unsigned int nr_pages = 1;
|
|
|
+
|
|
|
+ VM_BUG_ON_PAGE(!page->mapping, page);
|
|
|
+ VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
|
|
|
+
|
|
|
+ if (mem_cgroup_disabled())
|
|
|
+ return;
|
|
|
+ /*
|
|
|
+ * Swap faults will attempt to charge the same page multiple
|
|
|
+ * times. But reuse_swap_page() might have removed the page
|
|
|
+ * from swapcache already, so we can't check PageSwapCache().
|
|
|
+ */
|
|
|
+ if (!memcg)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (PageTransHuge(page)) {
|
|
|
+ nr_pages <<= compound_order(page);
|
|
|
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
|
+ }
|
|
|
+
|
|
|
+ commit_charge(page, memcg, nr_pages, PageAnon(page), lrucare);
|
|
|
+
|
|
|
+ if (do_swap_account && PageSwapCache(page)) {
|
|
|
+ swp_entry_t entry = { .val = page_private(page) };
|
|
|
+ /*
|
|
|
+ * The swap entry might not get freed for a long time,
|
|
|
+ * let's not wait for it. The page already received a
|
|
|
+ * memory+swap charge, drop the swap entry duplicate.
|
|
|
+ */
|
|
|
+ mem_cgroup_uncharge_swap(entry);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * mem_cgroup_cancel_charge - cancel a page charge
|
|
|
+ * @page: page to charge
|
|
|
+ * @memcg: memcg to charge the page to
|
|
|
+ *
|
|
|
+ * Cancel a charge transaction started by mem_cgroup_try_charge().
|
|
|
+ */
|
|
|
+void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
|
|
|
+{
|
|
|
+ unsigned int nr_pages = 1;
|
|
|
+
|
|
|
+ if (mem_cgroup_disabled())
|
|
|
+ return;
|
|
|
+ /*
|
|
|
+ * Swap faults will attempt to charge the same page multiple
|
|
|
+ * times. But reuse_swap_page() might have removed the page
|
|
|
+ * from swapcache already, so we can't check PageSwapCache().
|
|
|
+ */
|
|
|
+ if (!memcg)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (PageTransHuge(page)) {
|
|
|
+ nr_pages <<= compound_order(page);
|
|
|
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
|
+ }
|
|
|
+
|
|
|
+ cancel_charge(memcg, nr_pages);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* subsys_initcall() for memory controller.
|
|
|
*
|