|
|
@@ -820,17 +820,19 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
|
return VM_FAULT_FALLBACK;
|
|
|
}
|
|
|
- count_vm_event(THP_FAULT_ALLOC);
|
|
|
if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
|
|
|
put_page(page);
|
|
|
+ count_vm_event(THP_FAULT_FALLBACK);
|
|
|
return VM_FAULT_FALLBACK;
|
|
|
}
|
|
|
if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
|
|
|
mem_cgroup_uncharge_page(page);
|
|
|
put_page(page);
|
|
|
+ count_vm_event(THP_FAULT_FALLBACK);
|
|
|
return VM_FAULT_FALLBACK;
|
|
|
}
|
|
|
|
|
|
+ count_vm_event(THP_FAULT_ALLOC);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
@@ -1143,7 +1145,6 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
new_page = NULL;
|
|
|
|
|
|
if (unlikely(!new_page)) {
|
|
|
- count_vm_event(THP_FAULT_FALLBACK);
|
|
|
if (is_huge_zero_pmd(orig_pmd)) {
|
|
|
ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
|
|
|
address, pmd, orig_pmd, haddr);
|
|
|
@@ -1154,9 +1155,9 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
split_huge_page(page);
|
|
|
put_page(page);
|
|
|
}
|
|
|
+ count_vm_event(THP_FAULT_FALLBACK);
|
|
|
goto out;
|
|
|
}
|
|
|
- count_vm_event(THP_FAULT_ALLOC);
|
|
|
|
|
|
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
|
|
|
put_page(new_page);
|
|
|
@@ -1164,10 +1165,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
split_huge_page(page);
|
|
|
put_page(page);
|
|
|
}
|
|
|
+ count_vm_event(THP_FAULT_FALLBACK);
|
|
|
ret |= VM_FAULT_OOM;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ count_vm_event(THP_FAULT_ALLOC);
|
|
|
+
|
|
|
if (is_huge_zero_pmd(orig_pmd))
|
|
|
clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
|
|
|
else
|