|
@@ -270,6 +270,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
|
|
|
spinlock_t *ptl;
|
|
|
pte_t *orig_pte, *pte, ptent;
|
|
|
struct page *page;
|
|
|
+ int nr_swap = 0;
|
|
|
|
|
|
split_huge_pmd(vma, pmd, addr);
|
|
|
if (pmd_trans_unstable(pmd))
|
|
@@ -280,8 +281,24 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
|
|
|
for (; addr != end; pte++, addr += PAGE_SIZE) {
|
|
|
ptent = *pte;
|
|
|
|
|
|
- if (!pte_present(ptent))
|
|
|
+ if (pte_none(ptent))
|
|
|
continue;
|
|
|
+ /*
|
|
|
+ * If the pte has swp_entry, just clear page table to
|
|
|
+ * prevent swap-in which is more expensive rather than
|
|
|
+ * (page allocation + zeroing).
|
|
|
+ */
|
|
|
+ if (!pte_present(ptent)) {
|
|
|
+ swp_entry_t entry;
|
|
|
+
|
|
|
+ entry = pte_to_swp_entry(ptent);
|
|
|
+ if (non_swap_entry(entry))
|
|
|
+ continue;
|
|
|
+ nr_swap--;
|
|
|
+ free_swap_and_cache(entry);
|
|
|
+ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
page = vm_normal_page(vma, addr, ptent);
|
|
|
if (!page)
|
|
@@ -355,6 +372,12 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
|
|
|
}
|
|
|
}
|
|
|
out:
|
|
|
+ if (nr_swap) {
|
|
|
+ if (current->mm == mm)
|
|
|
+ sync_mm_rss(mm);
|
|
|
+
|
|
|
+ add_mm_counter(mm, MM_SWAPENTS, nr_swap);
|
|
|
+ }
|
|
|
arch_leave_lazy_mmu_mode();
|
|
|
pte_unmap_unlock(orig_pte, ptl);
|
|
|
cond_resched();
|