|
@@ -751,7 +751,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
|
|
|
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
|
|
|
|
|
- if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) {
|
|
|
+ if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) {
|
|
|
put_page(page);
|
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
|
return VM_FAULT_FALLBACK;
|
|
@@ -759,7 +759,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
|
|
|
pgtable = pte_alloc_one(mm, haddr);
|
|
|
if (unlikely(!pgtable)) {
|
|
|
- mem_cgroup_cancel_charge(page, memcg);
|
|
|
+ mem_cgroup_cancel_charge(page, memcg, true);
|
|
|
put_page(page);
|
|
|
return VM_FAULT_OOM;
|
|
|
}
|
|
@@ -775,7 +775,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
ptl = pmd_lock(mm, pmd);
|
|
|
if (unlikely(!pmd_none(*pmd))) {
|
|
|
spin_unlock(ptl);
|
|
|
- mem_cgroup_cancel_charge(page, memcg);
|
|
|
+ mem_cgroup_cancel_charge(page, memcg, true);
|
|
|
put_page(page);
|
|
|
pte_free(mm, pgtable);
|
|
|
} else {
|
|
@@ -786,7 +786,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
int ret;
|
|
|
|
|
|
spin_unlock(ptl);
|
|
|
- mem_cgroup_cancel_charge(page, memcg);
|
|
|
+ mem_cgroup_cancel_charge(page, memcg, true);
|
|
|
put_page(page);
|
|
|
pte_free(mm, pgtable);
|
|
|
ret = handle_userfault(vma, address, flags,
|
|
@@ -798,7 +798,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
entry = mk_huge_pmd(page, vma->vm_page_prot);
|
|
|
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
|
|
|
page_add_new_anon_rmap(page, vma, haddr, true);
|
|
|
- mem_cgroup_commit_charge(page, memcg, false);
|
|
|
+ mem_cgroup_commit_charge(page, memcg, false, true);
|
|
|
lru_cache_add_active_or_unevictable(page, vma);
|
|
|
pgtable_trans_huge_deposit(mm, pmd, pgtable);
|
|
|
set_pmd_at(mm, haddr, pmd, entry);
|
|
@@ -1095,13 +1095,14 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
|
|
|
vma, address, page_to_nid(page));
|
|
|
if (unlikely(!pages[i] ||
|
|
|
mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
|
|
|
- &memcg))) {
|
|
|
+ &memcg, false))) {
|
|
|
if (pages[i])
|
|
|
put_page(pages[i]);
|
|
|
while (--i >= 0) {
|
|
|
memcg = (void *)page_private(pages[i]);
|
|
|
set_page_private(pages[i], 0);
|
|
|
- mem_cgroup_cancel_charge(pages[i], memcg);
|
|
|
+ mem_cgroup_cancel_charge(pages[i], memcg,
|
|
|
+ false);
|
|
|
put_page(pages[i]);
|
|
|
}
|
|
|
kfree(pages);
|
|
@@ -1140,7 +1141,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
|
|
|
memcg = (void *)page_private(pages[i]);
|
|
|
set_page_private(pages[i], 0);
|
|
|
page_add_new_anon_rmap(pages[i], vma, haddr, false);
|
|
|
- mem_cgroup_commit_charge(pages[i], memcg, false);
|
|
|
+ mem_cgroup_commit_charge(pages[i], memcg, false, false);
|
|
|
lru_cache_add_active_or_unevictable(pages[i], vma);
|
|
|
pte = pte_offset_map(&_pmd, haddr);
|
|
|
VM_BUG_ON(!pte_none(*pte));
|
|
@@ -1168,7 +1169,7 @@ out_free_pages:
|
|
|
for (i = 0; i < HPAGE_PMD_NR; i++) {
|
|
|
memcg = (void *)page_private(pages[i]);
|
|
|
set_page_private(pages[i], 0);
|
|
|
- mem_cgroup_cancel_charge(pages[i], memcg);
|
|
|
+ mem_cgroup_cancel_charge(pages[i], memcg, false);
|
|
|
put_page(pages[i]);
|
|
|
}
|
|
|
kfree(pages);
|
|
@@ -1234,7 +1235,8 @@ alloc:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) {
|
|
|
+ if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg,
|
|
|
+ true))) {
|
|
|
put_page(new_page);
|
|
|
if (page) {
|
|
|
split_huge_page(page);
|
|
@@ -1263,7 +1265,7 @@ alloc:
|
|
|
put_user_huge_page(page);
|
|
|
if (unlikely(!pmd_same(*pmd, orig_pmd))) {
|
|
|
spin_unlock(ptl);
|
|
|
- mem_cgroup_cancel_charge(new_page, memcg);
|
|
|
+ mem_cgroup_cancel_charge(new_page, memcg, true);
|
|
|
put_page(new_page);
|
|
|
goto out_mn;
|
|
|
} else {
|
|
@@ -1272,7 +1274,7 @@ alloc:
|
|
|
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
|
|
|
pmdp_huge_clear_flush_notify(vma, haddr, pmd);
|
|
|
page_add_new_anon_rmap(new_page, vma, haddr, true);
|
|
|
- mem_cgroup_commit_charge(new_page, memcg, false);
|
|
|
+ mem_cgroup_commit_charge(new_page, memcg, false, true);
|
|
|
lru_cache_add_active_or_unevictable(new_page, vma);
|
|
|
set_pmd_at(mm, haddr, pmd, entry);
|
|
|
update_mmu_cache_pmd(vma, address, pmd);
|
|
@@ -2583,7 +2585,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
goto out_nolock;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg))) {
|
|
|
+ if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
|
|
|
result = SCAN_CGROUP_CHARGE_FAIL;
|
|
|
goto out_nolock;
|
|
|
}
|
|
@@ -2683,7 +2685,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
spin_lock(pmd_ptl);
|
|
|
BUG_ON(!pmd_none(*pmd));
|
|
|
page_add_new_anon_rmap(new_page, vma, address, true);
|
|
|
- mem_cgroup_commit_charge(new_page, memcg, false);
|
|
|
+ mem_cgroup_commit_charge(new_page, memcg, false, true);
|
|
|
lru_cache_add_active_or_unevictable(new_page, vma);
|
|
|
pgtable_trans_huge_deposit(mm, pmd, pgtable);
|
|
|
set_pmd_at(mm, address, pmd, _pmd);
|
|
@@ -2703,7 +2705,7 @@ out_nolock:
|
|
|
trace_mm_collapse_huge_page(mm, isolated, result);
|
|
|
return;
|
|
|
out:
|
|
|
- mem_cgroup_cancel_charge(new_page, memcg);
|
|
|
+ mem_cgroup_cancel_charge(new_page, memcg, true);
|
|
|
goto out_up_write;
|
|
|
}
|
|
|
|