|
@@ -827,7 +827,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
|
return VM_FAULT_FALLBACK;
|
|
|
}
|
|
|
- if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) {
|
|
|
+ if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_TRANSHUGE))) {
|
|
|
put_page(page);
|
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
|
return VM_FAULT_FALLBACK;
|
|
@@ -1132,7 +1132,7 @@ alloc:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) {
|
|
|
+ if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_TRANSHUGE))) {
|
|
|
put_page(new_page);
|
|
|
if (page) {
|
|
|
split_huge_page(page);
|
|
@@ -2399,7 +2399,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
if (!new_page)
|
|
|
return;
|
|
|
|
|
|
- if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)))
|
|
|
+ if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_TRANSHUGE)))
|
|
|
return;
|
|
|
|
|
|
/*
|