|
@@ -803,7 +803,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
return VM_FAULT_FALLBACK;
|
|
return VM_FAULT_FALLBACK;
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
return VM_FAULT_OOM;
|
|
return VM_FAULT_OOM;
|
|
- if (unlikely(khugepaged_enter(vma)))
|
|
|
|
|
|
+ if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
|
|
return VM_FAULT_OOM;
|
|
return VM_FAULT_OOM;
|
|
if (!(flags & FAULT_FLAG_WRITE) &&
|
|
if (!(flags & FAULT_FLAG_WRITE) &&
|
|
transparent_hugepage_use_zero_page()) {
|
|
transparent_hugepage_use_zero_page()) {
|
|
@@ -1970,7 +1970,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
|
|
* register it here without waiting a page fault that
|
|
* register it here without waiting a page fault that
|
|
* may not happen any time soon.
|
|
* may not happen any time soon.
|
|
*/
|
|
*/
|
|
- if (unlikely(khugepaged_enter_vma_merge(vma)))
|
|
|
|
|
|
+ if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
break;
|
|
break;
|
|
case MADV_NOHUGEPAGE:
|
|
case MADV_NOHUGEPAGE:
|
|
@@ -2071,7 +2071,8 @@ int __khugepaged_enter(struct mm_struct *mm)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
|
|
|
|
|
+int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
|
|
|
+ unsigned long vm_flags)
|
|
{
|
|
{
|
|
unsigned long hstart, hend;
|
|
unsigned long hstart, hend;
|
|
if (!vma->anon_vma)
|
|
if (!vma->anon_vma)
|
|
@@ -2083,11 +2084,11 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
|
if (vma->vm_ops)
|
|
if (vma->vm_ops)
|
|
/* khugepaged not yet working on file or special mappings */
|
|
/* khugepaged not yet working on file or special mappings */
|
|
return 0;
|
|
return 0;
|
|
- VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
|
|
|
|
|
|
+ VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
|
|
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
|
|
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
|
|
hend = vma->vm_end & HPAGE_PMD_MASK;
|
|
hend = vma->vm_end & HPAGE_PMD_MASK;
|
|
if (hstart < hend)
|
|
if (hstart < hend)
|
|
- return khugepaged_enter(vma);
|
|
|
|
|
|
+ return khugepaged_enter(vma, vm_flags);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|