|
@@ -986,15 +986,6 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(pmd_trans_splitting(pmd))) {
|
|
|
- /* split huge page running from under us */
|
|
|
- spin_unlock(src_ptl);
|
|
|
- spin_unlock(dst_ptl);
|
|
|
- pte_free(dst_mm, pgtable);
|
|
|
-
|
|
|
- wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
|
|
|
- goto out;
|
|
|
- }
|
|
|
src_page = pmd_page(pmd);
|
|
|
VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
|
|
|
get_page(src_page);
|
|
@@ -1470,7 +1461,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|
|
pmd_t orig_pmd;
|
|
|
spinlock_t *ptl;
|
|
|
|
|
|
- if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1)
|
|
|
+ if (!__pmd_trans_huge_lock(pmd, vma, &ptl))
|
|
|
return 0;
|
|
|
/*
|
|
|
* For architectures like ppc64 we look at deposited pgtable
|
|
@@ -1504,13 +1495,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
|
|
|
+bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
|
|
|
unsigned long old_addr,
|
|
|
unsigned long new_addr, unsigned long old_end,
|
|
|
pmd_t *old_pmd, pmd_t *new_pmd)
|
|
|
{
|
|
|
spinlock_t *old_ptl, *new_ptl;
|
|
|
- int ret = 0;
|
|
|
pmd_t pmd;
|
|
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
@@ -1519,7 +1509,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
|
|
|
(new_addr & ~HPAGE_PMD_MASK) ||
|
|
|
old_end - old_addr < HPAGE_PMD_SIZE ||
|
|
|
(new_vma->vm_flags & VM_NOHUGEPAGE))
|
|
|
- goto out;
|
|
|
+ return false;
|
|
|
|
|
|
/*
|
|
|
* The destination pmd shouldn't be established, free_pgtables()
|
|
@@ -1527,15 +1517,14 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
|
|
|
*/
|
|
|
if (WARN_ON(!pmd_none(*new_pmd))) {
|
|
|
VM_BUG_ON(pmd_trans_huge(*new_pmd));
|
|
|
- goto out;
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* We don't have to worry about the ordering of src and dst
|
|
|
* ptlocks because exclusive mmap_sem prevents deadlock.
|
|
|
*/
|
|
|
- ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
|
|
|
- if (ret == 1) {
|
|
|
+ if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) {
|
|
|
new_ptl = pmd_lockptr(mm, new_pmd);
|
|
|
if (new_ptl != old_ptl)
|
|
|
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
|
@@ -1551,9 +1540,9 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
|
|
|
if (new_ptl != old_ptl)
|
|
|
spin_unlock(new_ptl);
|
|
|
spin_unlock(old_ptl);
|
|
|
+ return true;
|
|
|
}
|
|
|
-out:
|
|
|
- return ret;
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1569,7 +1558,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
spinlock_t *ptl;
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
|
|
|
+ if (__pmd_trans_huge_lock(pmd, vma, &ptl)) {
|
|
|
pmd_t entry;
|
|
|
bool preserve_write = prot_numa && pmd_write(*pmd);
|
|
|
ret = 1;
|
|
@@ -1600,29 +1589,19 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Returns 1 if a given pmd maps a stable (not under splitting) thp.
|
|
|
- * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
|
|
|
+ * Returns true if a given pmd maps a thp, false otherwise.
|
|
|
*
|
|
|
- * Note that if it returns 1, this routine returns without unlocking page
|
|
|
- * table locks. So callers must unlock them.
|
|
|
+ * Note that if it returns true, this routine returns without unlocking page
|
|
|
+ * table lock. So callers must unlock it.
|
|
|
*/
|
|
|
-int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
|
|
+bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
|
|
spinlock_t **ptl)
|
|
|
{
|
|
|
*ptl = pmd_lock(vma->vm_mm, pmd);
|
|
|
- if (likely(pmd_trans_huge(*pmd))) {
|
|
|
- if (unlikely(pmd_trans_splitting(*pmd))) {
|
|
|
- spin_unlock(*ptl);
|
|
|
- wait_split_huge_page(vma->anon_vma, pmd);
|
|
|
- return -1;
|
|
|
- } else {
|
|
|
- /* Thp mapped by 'pmd' is stable, so we can
|
|
|
- * handle it as it is. */
|
|
|
- return 1;
|
|
|
- }
|
|
|
- }
|
|
|
+ if (likely(pmd_trans_huge(*pmd)))
|
|
|
+ return true;
|
|
|
spin_unlock(*ptl);
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1636,7 +1615,6 @@ int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
|
|
pmd_t *page_check_address_pmd(struct page *page,
|
|
|
struct mm_struct *mm,
|
|
|
unsigned long address,
|
|
|
- enum page_check_address_pmd_flag flag,
|
|
|
spinlock_t **ptl)
|
|
|
{
|
|
|
pgd_t *pgd;
|
|
@@ -1659,21 +1637,8 @@ pmd_t *page_check_address_pmd(struct page *page,
|
|
|
goto unlock;
|
|
|
if (pmd_page(*pmd) != page)
|
|
|
goto unlock;
|
|
|
- /*
|
|
|
- * split_vma() may create temporary aliased mappings. There is
|
|
|
- * no risk as long as all huge pmd are found and have their
|
|
|
- * splitting bit set before __split_huge_page_refcount
|
|
|
- * runs. Finding the same huge pmd more than once during the
|
|
|
- * same rmap walk is not a problem.
|
|
|
- */
|
|
|
- if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
|
|
|
- pmd_trans_splitting(*pmd))
|
|
|
- goto unlock;
|
|
|
- if (pmd_trans_huge(*pmd)) {
|
|
|
- VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
|
|
|
- !pmd_trans_splitting(*pmd));
|
|
|
+ if (pmd_trans_huge(*pmd))
|
|
|
return pmd;
|
|
|
- }
|
|
|
unlock:
|
|
|
spin_unlock(*ptl);
|
|
|
return NULL;
|