|
@@ -887,11 +887,21 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
|
.address = address,
|
|
.address = address,
|
|
.flags = PVMW_SYNC,
|
|
.flags = PVMW_SYNC,
|
|
};
|
|
};
|
|
|
|
+ unsigned long start = address, end;
|
|
int *cleaned = arg;
|
|
int *cleaned = arg;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We have to assume the worse case ie pmd for invalidation. Note that
|
|
|
|
+ * the page can not be free from this function.
|
|
|
|
+ */
|
|
|
|
+ end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
|
|
|
|
+ mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
|
|
|
|
+
|
|
while (page_vma_mapped_walk(&pvmw)) {
|
|
while (page_vma_mapped_walk(&pvmw)) {
|
|
|
|
+ unsigned long cstart, cend;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
- address = pvmw.address;
|
|
|
|
|
|
+
|
|
|
|
+ cstart = address = pvmw.address;
|
|
if (pvmw.pte) {
|
|
if (pvmw.pte) {
|
|
pte_t entry;
|
|
pte_t entry;
|
|
pte_t *pte = pvmw.pte;
|
|
pte_t *pte = pvmw.pte;
|
|
@@ -904,6 +914,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
|
entry = pte_wrprotect(entry);
|
|
entry = pte_wrprotect(entry);
|
|
entry = pte_mkclean(entry);
|
|
entry = pte_mkclean(entry);
|
|
set_pte_at(vma->vm_mm, address, pte, entry);
|
|
set_pte_at(vma->vm_mm, address, pte, entry);
|
|
|
|
+ cend = cstart + PAGE_SIZE;
|
|
ret = 1;
|
|
ret = 1;
|
|
} else {
|
|
} else {
|
|
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
|
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
|
@@ -918,6 +929,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
|
entry = pmd_wrprotect(entry);
|
|
entry = pmd_wrprotect(entry);
|
|
entry = pmd_mkclean(entry);
|
|
entry = pmd_mkclean(entry);
|
|
set_pmd_at(vma->vm_mm, address, pmd, entry);
|
|
set_pmd_at(vma->vm_mm, address, pmd, entry);
|
|
|
|
+ cstart &= PMD_MASK;
|
|
|
|
+ cend = cstart + PMD_SIZE;
|
|
ret = 1;
|
|
ret = 1;
|
|
#else
|
|
#else
|
|
/* unexpected pmd-mapped page? */
|
|
/* unexpected pmd-mapped page? */
|
|
@@ -926,11 +939,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
|
}
|
|
}
|
|
|
|
|
|
if (ret) {
|
|
if (ret) {
|
|
- mmu_notifier_invalidate_page(vma->vm_mm, address);
|
|
|
|
|
|
+ mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);
|
|
(*cleaned)++;
|
|
(*cleaned)++;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
|
|
|
|
+
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1324,6 +1339,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
pte_t pteval;
|
|
pte_t pteval;
|
|
struct page *subpage;
|
|
struct page *subpage;
|
|
bool ret = true;
|
|
bool ret = true;
|
|
|
|
+ unsigned long start = address, end;
|
|
enum ttu_flags flags = (enum ttu_flags)arg;
|
|
enum ttu_flags flags = (enum ttu_flags)arg;
|
|
|
|
|
|
/* munlock has nothing to gain from examining un-locked vmas */
|
|
/* munlock has nothing to gain from examining un-locked vmas */
|
|
@@ -1335,6 +1351,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
flags & TTU_MIGRATION, page);
|
|
flags & TTU_MIGRATION, page);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We have to assume the worse case ie pmd for invalidation. Note that
|
|
|
|
+ * the page can not be free in this function as call of try_to_unmap()
|
|
|
|
+ * must hold a reference on the page.
|
|
|
|
+ */
|
|
|
|
+ end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
|
|
|
|
+ mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
|
|
|
|
+
|
|
while (page_vma_mapped_walk(&pvmw)) {
|
|
while (page_vma_mapped_walk(&pvmw)) {
|
|
/*
|
|
/*
|
|
* If the page is mlock()d, we cannot swap it out.
|
|
* If the page is mlock()d, we cannot swap it out.
|
|
@@ -1445,6 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
|
|
if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
|
|
WARN_ON_ONCE(1);
|
|
WARN_ON_ONCE(1);
|
|
ret = false;
|
|
ret = false;
|
|
|
|
+ /* We have to invalidate as we cleared the pte */
|
|
page_vma_mapped_walk_done(&pvmw);
|
|
page_vma_mapped_walk_done(&pvmw);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -1490,8 +1515,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
discard:
|
|
discard:
|
|
page_remove_rmap(subpage, PageHuge(page));
|
|
page_remove_rmap(subpage, PageHuge(page));
|
|
put_page(page);
|
|
put_page(page);
|
|
- mmu_notifier_invalidate_page(mm, address);
|
|
|
|
|
|
+ mmu_notifier_invalidate_range(mm, address,
|
|
|
|
+ address + PAGE_SIZE);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
|
|
|
|
+
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|