|
@@ -1348,7 +1348,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
|
|
|
if (flags & TTU_SPLIT_HUGE_PMD) {
|
|
|
split_huge_pmd_address(vma, address,
|
|
|
- flags & TTU_MIGRATION, page);
|
|
|
+ flags & TTU_SPLIT_FREEZE, page);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1445,7 +1445,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
*/
|
|
|
dec_mm_counter(mm, mm_counter(page));
|
|
|
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
|
|
|
- (flags & TTU_MIGRATION)) {
|
|
|
+ (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
|
|
|
swp_entry_t entry;
|
|
|
pte_t swp_pte;
|
|
|
/*
|
|
@@ -1575,7 +1575,8 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
|
|
|
* locking requirements of exec(), migration skips
|
|
|
* temporary VMAs until after exec() completes.
|
|
|
*/
|
|
|
- if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
|
|
|
+ if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
|
|
|
+ && !PageKsm(page) && PageAnon(page))
|
|
|
rwc.invalid_vma = invalid_migration_vma;
|
|
|
|
|
|
if (flags & TTU_RMAP_LOCKED)
|