|
@@ -995,7 +995,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
|
|
|
ret = -EAGAIN;
|
|
|
pmd = *src_pmd;
|
|
|
- if (unlikely(!pmd_trans_huge(pmd))) {
|
|
|
+ if (unlikely(!pmd_trans_huge(pmd) && !pmd_devmap(pmd))) {
|
|
|
pte_free(dst_mm, pgtable);
|
|
|
goto out_unlock;
|
|
|
}
|
|
@@ -1018,17 +1018,20 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
|
|
|
- src_page = pmd_page(pmd);
|
|
|
- VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
|
|
|
- get_page(src_page);
|
|
|
- page_dup_rmap(src_page, true);
|
|
|
- add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
|
|
|
+ if (pmd_trans_huge(pmd)) {
|
|
|
+ /* thp accounting separate from pmd_devmap accounting */
|
|
|
+ src_page = pmd_page(pmd);
|
|
|
+ VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
|
|
|
+ get_page(src_page);
|
|
|
+ page_dup_rmap(src_page, true);
|
|
|
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
|
|
|
+ atomic_long_inc(&dst_mm->nr_ptes);
|
|
|
+ pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
|
|
|
+ }
|
|
|
|
|
|
pmdp_set_wrprotect(src_mm, addr, src_pmd);
|
|
|
pmd = pmd_mkold(pmd_wrprotect(pmd));
|
|
|
- pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
|
|
|
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
|
|
|
- atomic_long_inc(&dst_mm->nr_ptes);
|
|
|
|
|
|
ret = 0;
|
|
|
out_unlock:
|
|
@@ -1716,7 +1719,7 @@ bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
|
|
spinlock_t **ptl)
|
|
|
{
|
|
|
*ptl = pmd_lock(vma->vm_mm, pmd);
|
|
|
- if (likely(pmd_trans_huge(*pmd)))
|
|
|
+ if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
|
|
|
return true;
|
|
|
spin_unlock(*ptl);
|
|
|
return false;
|
|
@@ -2788,7 +2791,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
|
|
|
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
|
|
|
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
|
|
|
- VM_BUG_ON(!pmd_trans_huge(*pmd));
|
|
|
+ VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd));
|
|
|
|
|
|
count_vm_event(THP_SPLIT_PMD);
|
|
|
|
|
@@ -2901,14 +2904,15 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
|
|
|
mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
|
|
|
ptl = pmd_lock(mm, pmd);
|
|
|
- if (unlikely(!pmd_trans_huge(*pmd)))
|
|
|
+ if (pmd_trans_huge(*pmd)) {
|
|
|
+ page = pmd_page(*pmd);
|
|
|
+ if (PageMlocked(page))
|
|
|
+ get_page(page);
|
|
|
+ else
|
|
|
+ page = NULL;
|
|
|
+ } else if (!pmd_devmap(*pmd))
|
|
|
goto out;
|
|
|
- page = pmd_page(*pmd);
|
|
|
__split_huge_pmd_locked(vma, pmd, haddr, false);
|
|
|
- if (PageMlocked(page))
|
|
|
- get_page(page);
|
|
|
- else
|
|
|
- page = NULL;
|
|
|
out:
|
|
|
spin_unlock(ptl);
|
|
|
mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
|
|
@@ -2938,7 +2942,7 @@ static void split_huge_pmd_address(struct vm_area_struct *vma,
|
|
|
return;
|
|
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
|
- if (!pmd_present(*pmd) || !pmd_trans_huge(*pmd))
|
|
|
+ if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
|
|
|
return;
|
|
|
/*
|
|
|
* Caller holds the mmap_sem write mode, so a huge pmd cannot
|