|
@@ -827,7 +827,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
|
return VM_FAULT_FALLBACK;
|
|
|
}
|
|
|
- if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
|
|
|
+ if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) {
|
|
|
put_page(page);
|
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
|
return VM_FAULT_FALLBACK;
|
|
@@ -968,7 +968,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
|
|
|
__GFP_OTHER_NODE,
|
|
|
vma, address, page_to_nid(page));
|
|
|
if (unlikely(!pages[i] ||
|
|
|
- mem_cgroup_newpage_charge(pages[i], mm,
|
|
|
+ mem_cgroup_charge_anon(pages[i], mm,
|
|
|
GFP_KERNEL))) {
|
|
|
if (pages[i])
|
|
|
put_page(pages[i]);
|
|
@@ -1101,7 +1101,7 @@ alloc:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
|
|
|
+ if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) {
|
|
|
put_page(new_page);
|
|
|
if (page) {
|
|
|
split_huge_page(page);
|
|
@@ -2359,7 +2359,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
if (!new_page)
|
|
|
return;
|
|
|
|
|
|
- if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
|
|
|
+ if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)))
|
|
|
return;
|
|
|
|
|
|
/*
|