|
@@ -1179,15 +1179,18 @@ out:
|
|
|
/*
|
|
|
* Subfunctions of try_to_unmap: try_to_unmap_one called
|
|
|
* repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file.
|
|
|
+ *
|
|
|
+ * @arg: enum ttu_flags will be passed to this argument
|
|
|
*/
|
|
|
int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
- unsigned long address, enum ttu_flags flags)
|
|
|
+ unsigned long address, void *arg)
|
|
|
{
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
pte_t *pte;
|
|
|
pte_t pteval;
|
|
|
spinlock_t *ptl;
|
|
|
int ret = SWAP_AGAIN;
|
|
|
+ enum ttu_flags flags = (enum ttu_flags)arg;
|
|
|
|
|
|
pte = page_check_address(page, mm, address, &ptl, 0);
|
|
|
if (!pte)
|
|
@@ -1513,6 +1516,11 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
|
|
|
+{
|
|
|
+ return is_vma_temporary_stack(vma);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
|
|
|
* rmap method
|
|
@@ -1558,7 +1566,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
|
|
continue;
|
|
|
|
|
|
address = vma_address(page, vma);
|
|
|
- ret = try_to_unmap_one(page, vma, address, flags);
|
|
|
+ ret = try_to_unmap_one(page, vma, address, (void *)flags);
|
|
|
if (ret != SWAP_AGAIN || !page_mapped(page))
|
|
|
break;
|
|
|
}
|
|
@@ -1592,7 +1600,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
|
mutex_lock(&mapping->i_mmap_mutex);
|
|
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
|
|
unsigned long address = vma_address(page, vma);
|
|
|
- ret = try_to_unmap_one(page, vma, address, flags);
|
|
|
+ ret = try_to_unmap_one(page, vma, address, (void *)flags);
|
|
|
if (ret != SWAP_AGAIN || !page_mapped(page))
|
|
|
goto out;
|
|
|
}
|
|
@@ -1614,6 +1622,11 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int page_not_mapped(struct page *page)
|
|
|
+{
|
|
|
+ return !page_mapped(page);
|
|
|
+};
|
|
|
+
|
|
|
/**
|
|
|
* try_to_unmap - try to remove all page table mappings to a page
|
|
|
* @page: the page to get unmapped
|
|
@@ -1631,16 +1644,29 @@ out:
|
|
|
int try_to_unmap(struct page *page, enum ttu_flags flags)
|
|
|
{
|
|
|
int ret;
|
|
|
+ struct rmap_walk_control rwc = {
|
|
|
+ .rmap_one = try_to_unmap_one,
|
|
|
+ .arg = (void *)flags,
|
|
|
+ .done = page_not_mapped,
|
|
|
+ .file_nonlinear = try_to_unmap_nonlinear,
|
|
|
+ .anon_lock = page_lock_anon_vma_read,
|
|
|
+ };
|
|
|
|
|
|
- BUG_ON(!PageLocked(page));
|
|
|
VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
|
|
|
|
|
|
- if (unlikely(PageKsm(page)))
|
|
|
- ret = try_to_unmap_ksm(page, flags);
|
|
|
- else if (PageAnon(page))
|
|
|
- ret = try_to_unmap_anon(page, flags);
|
|
|
- else
|
|
|
- ret = try_to_unmap_file(page, flags);
|
|
|
+ /*
|
|
|
+ * During exec, a temporary VMA is setup and later moved.
|
|
|
+ * The VMA is moved under the anon_vma lock but not the
|
|
|
+ * page tables leading to a race where migration cannot
|
|
|
+ * find the migration ptes. Rather than increasing the
|
|
|
+ * locking requirements of exec(), migration skips
|
|
|
+ * temporary VMAs until after exec() completes.
|
|
|
+ */
|
|
|
+ if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page))
|
|
|
+ rwc.invalid_vma = invalid_migration_vma;
|
|
|
+
|
|
|
+ ret = rmap_walk(page, &rwc);
|
|
|
+
|
|
|
if (ret != SWAP_MLOCK && !page_mapped(page))
|
|
|
ret = SWAP_SUCCESS;
|
|
|
return ret;
|
|
@@ -1683,7 +1709,6 @@ void __put_anon_vma(struct anon_vma *anon_vma)
|
|
|
anon_vma_free(anon_vma);
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_MIGRATION
|
|
|
static struct anon_vma *rmap_walk_anon_lock(struct page *page,
|
|
|
struct rmap_walk_control *rwc)
|
|
|
{
|
|
@@ -1785,7 +1810,6 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
|
|
|
else
|
|
|
return rmap_walk_file(page, rwc);
|
|
|
}
|
|
|
-#endif /* CONFIG_MIGRATION */
|
|
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
/*
|