|
@@ -1162,7 +1162,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
if (vma->vm_flags & VM_LOCKED)
|
|
if (vma->vm_flags & VM_LOCKED)
|
|
goto out_mlock;
|
|
goto out_mlock;
|
|
|
|
|
|
- if (TTU_ACTION(flags) == TTU_MUNLOCK)
|
|
|
|
|
|
+ if (flags & TTU_MUNLOCK)
|
|
goto out_unmap;
|
|
goto out_unmap;
|
|
}
|
|
}
|
|
if (!(flags & TTU_IGNORE_ACCESS)) {
|
|
if (!(flags & TTU_IGNORE_ACCESS)) {
|
|
@@ -1230,7 +1230,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
* pte. do_swap_page() will wait until the migration
|
|
* pte. do_swap_page() will wait until the migration
|
|
* pte is removed and then restart fault handling.
|
|
* pte is removed and then restart fault handling.
|
|
*/
|
|
*/
|
|
- BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
|
|
|
|
|
|
+ BUG_ON(!(flags & TTU_MIGRATION));
|
|
entry = make_migration_entry(page, pte_write(pteval));
|
|
entry = make_migration_entry(page, pte_write(pteval));
|
|
}
|
|
}
|
|
swp_pte = swp_entry_to_pte(entry);
|
|
swp_pte = swp_entry_to_pte(entry);
|
|
@@ -1239,7 +1239,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
set_pte_at(mm, address, pte, swp_pte);
|
|
set_pte_at(mm, address, pte, swp_pte);
|
|
BUG_ON(pte_file(*pte));
|
|
BUG_ON(pte_file(*pte));
|
|
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
|
|
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
|
|
- (TTU_ACTION(flags) == TTU_MIGRATION)) {
|
|
|
|
|
|
+ (flags & TTU_MIGRATION)) {
|
|
/* Establish migration entry for a file page */
|
|
/* Establish migration entry for a file page */
|
|
swp_entry_t entry;
|
|
swp_entry_t entry;
|
|
entry = make_migration_entry(page, pte_write(pteval));
|
|
entry = make_migration_entry(page, pte_write(pteval));
|
|
@@ -1252,7 +1252,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
|
|
|
out_unmap:
|
|
out_unmap:
|
|
pte_unmap_unlock(pte, ptl);
|
|
pte_unmap_unlock(pte, ptl);
|
|
- if (ret != SWAP_FAIL && TTU_ACTION(flags) != TTU_MUNLOCK)
|
|
|
|
|
|
+ if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK))
|
|
mmu_notifier_invalidate_page(mm, address);
|
|
mmu_notifier_invalidate_page(mm, address);
|
|
out:
|
|
out:
|
|
return ret;
|
|
return ret;
|
|
@@ -1539,7 +1539,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
|
|
* locking requirements of exec(), migration skips
|
|
* locking requirements of exec(), migration skips
|
|
* temporary VMAs until after exec() completes.
|
|
* temporary VMAs until after exec() completes.
|
|
*/
|
|
*/
|
|
- if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page))
|
|
|
|
|
|
+ if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
|
|
rwc.invalid_vma = invalid_migration_vma;
|
|
rwc.invalid_vma = invalid_migration_vma;
|
|
|
|
|
|
ret = rmap_walk(page, &rwc);
|
|
ret = rmap_walk(page, &rwc);
|