|
@@ -528,7 +528,11 @@ void free_pgd_range(struct mmu_gather *tlb,
|
|
|
end -= PMD_SIZE;
|
|
|
if (addr > end - 1)
|
|
|
return;
|
|
|
-
|
|
|
+ /*
|
|
|
+ * We add page table cache pages with PAGE_SIZE,
|
|
|
+ * (see pte_free_tlb()), flush the tlb if we need
|
|
|
+ */
|
|
|
+ tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
|
|
|
pgd = pgd_offset(tlb->mm, addr);
|
|
|
do {
|
|
|
next = pgd_addr_end(addr, end);
|
|
@@ -1120,6 +1124,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
|
|
swp_entry_t entry;
|
|
|
struct page *pending_page = NULL;
|
|
|
|
|
|
+ tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
|
|
|
again:
|
|
|
init_rss_vec(rss);
|
|
|
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|