|
@@ -1082,6 +1082,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
|
spinlock_t *ptl;
|
|
spinlock_t *ptl;
|
|
pte_t *start_pte;
|
|
pte_t *start_pte;
|
|
pte_t *pte;
|
|
pte_t *pte;
|
|
|
|
+ swp_entry_t entry;
|
|
|
|
|
|
again:
|
|
again:
|
|
init_rss_vec(rss);
|
|
init_rss_vec(rss);
|
|
@@ -1107,28 +1108,12 @@ again:
|
|
if (details->check_mapping &&
|
|
if (details->check_mapping &&
|
|
details->check_mapping != page->mapping)
|
|
details->check_mapping != page->mapping)
|
|
continue;
|
|
continue;
|
|
- /*
|
|
|
|
- * Each page->index must be checked when
|
|
|
|
- * invalidating or truncating nonlinear.
|
|
|
|
- */
|
|
|
|
- if (details->nonlinear_vma &&
|
|
|
|
- (page->index < details->first_index ||
|
|
|
|
- page->index > details->last_index))
|
|
|
|
- continue;
|
|
|
|
}
|
|
}
|
|
ptent = ptep_get_and_clear_full(mm, addr, pte,
|
|
ptent = ptep_get_and_clear_full(mm, addr, pte,
|
|
tlb->fullmm);
|
|
tlb->fullmm);
|
|
tlb_remove_tlb_entry(tlb, pte, addr);
|
|
tlb_remove_tlb_entry(tlb, pte, addr);
|
|
if (unlikely(!page))
|
|
if (unlikely(!page))
|
|
continue;
|
|
continue;
|
|
- if (unlikely(details) && details->nonlinear_vma
|
|
|
|
- && linear_page_index(details->nonlinear_vma,
|
|
|
|
- addr) != page->index) {
|
|
|
|
- pte_t ptfile = pgoff_to_pte(page->index);
|
|
|
|
- if (pte_soft_dirty(ptent))
|
|
|
|
- ptfile = pte_file_mksoft_dirty(ptfile);
|
|
|
|
- set_pte_at(mm, addr, pte, ptfile);
|
|
|
|
- }
|
|
|
|
if (PageAnon(page))
|
|
if (PageAnon(page))
|
|
rss[MM_ANONPAGES]--;
|
|
rss[MM_ANONPAGES]--;
|
|
else {
|
|
else {
|
|
@@ -1151,33 +1136,25 @@ again:
|
|
}
|
|
}
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
- /*
|
|
|
|
- * If details->check_mapping, we leave swap entries;
|
|
|
|
- * if details->nonlinear_vma, we leave file entries.
|
|
|
|
- */
|
|
|
|
|
|
+ /* If details->check_mapping, we leave swap entries. */
|
|
if (unlikely(details))
|
|
if (unlikely(details))
|
|
continue;
|
|
continue;
|
|
- if (pte_file(ptent)) {
|
|
|
|
- if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
|
|
|
|
- print_bad_pte(vma, addr, ptent, NULL);
|
|
|
|
- } else {
|
|
|
|
- swp_entry_t entry = pte_to_swp_entry(ptent);
|
|
|
|
|
|
|
|
- if (!non_swap_entry(entry))
|
|
|
|
- rss[MM_SWAPENTS]--;
|
|
|
|
- else if (is_migration_entry(entry)) {
|
|
|
|
- struct page *page;
|
|
|
|
|
|
+ entry = pte_to_swp_entry(ptent);
|
|
|
|
+ if (!non_swap_entry(entry))
|
|
|
|
+ rss[MM_SWAPENTS]--;
|
|
|
|
+ else if (is_migration_entry(entry)) {
|
|
|
|
+ struct page *page;
|
|
|
|
|
|
- page = migration_entry_to_page(entry);
|
|
|
|
|
|
+ page = migration_entry_to_page(entry);
|
|
|
|
|
|
- if (PageAnon(page))
|
|
|
|
- rss[MM_ANONPAGES]--;
|
|
|
|
- else
|
|
|
|
- rss[MM_FILEPAGES]--;
|
|
|
|
- }
|
|
|
|
- if (unlikely(!free_swap_and_cache(entry)))
|
|
|
|
- print_bad_pte(vma, addr, ptent, NULL);
|
|
|
|
|
|
+ if (PageAnon(page))
|
|
|
|
+ rss[MM_ANONPAGES]--;
|
|
|
|
+ else
|
|
|
|
+ rss[MM_FILEPAGES]--;
|
|
}
|
|
}
|
|
|
|
+ if (unlikely(!free_swap_and_cache(entry)))
|
|
|
|
+ print_bad_pte(vma, addr, ptent, NULL);
|
|
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
|
|
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
|
|
|
@@ -1277,7 +1254,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
|
|
pgd_t *pgd;
|
|
pgd_t *pgd;
|
|
unsigned long next;
|
|
unsigned long next;
|
|
|
|
|
|
- if (details && !details->check_mapping && !details->nonlinear_vma)
|
|
|
|
|
|
+ if (details && !details->check_mapping)
|
|
details = NULL;
|
|
details = NULL;
|
|
|
|
|
|
BUG_ON(addr >= end);
|
|
BUG_ON(addr >= end);
|
|
@@ -1371,7 +1348,7 @@ void unmap_vmas(struct mmu_gather *tlb,
|
|
* @vma: vm_area_struct holding the applicable pages
|
|
* @vma: vm_area_struct holding the applicable pages
|
|
* @start: starting address of pages to zap
|
|
* @start: starting address of pages to zap
|
|
* @size: number of bytes to zap
|
|
* @size: number of bytes to zap
|
|
- * @details: details of nonlinear truncation or shared cache invalidation
|
|
|
|
|
|
+ * @details: details of shared cache invalidation
|
|
*
|
|
*
|
|
* Caller must protect the VMA list
|
|
* Caller must protect the VMA list
|
|
*/
|
|
*/
|
|
@@ -1397,7 +1374,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
|
|
* @vma: vm_area_struct holding the applicable pages
|
|
* @vma: vm_area_struct holding the applicable pages
|
|
* @address: starting address of pages to zap
|
|
* @address: starting address of pages to zap
|
|
* @size: number of bytes to zap
|
|
* @size: number of bytes to zap
|
|
- * @details: details of nonlinear truncation or shared cache invalidation
|
|
|
|
|
|
+ * @details: details of shared cache invalidation
|
|
*
|
|
*
|
|
* The range must fit into one VMA.
|
|
* The range must fit into one VMA.
|
|
*/
|
|
*/
|
|
@@ -2331,25 +2308,11 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void unmap_mapping_range_list(struct list_head *head,
|
|
|
|
- struct zap_details *details)
|
|
|
|
-{
|
|
|
|
- struct vm_area_struct *vma;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * In nonlinear VMAs there is no correspondence between virtual address
|
|
|
|
- * offset and file offset. So we must perform an exhaustive search
|
|
|
|
- * across *all* the pages in each nonlinear VMA, not just the pages
|
|
|
|
- * whose virtual address lies outside the file truncation point.
|
|
|
|
- */
|
|
|
|
- list_for_each_entry(vma, head, shared.nonlinear) {
|
|
|
|
- details->nonlinear_vma = vma;
|
|
|
|
- unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
- * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
|
|
|
|
|
|
+ * unmap_mapping_range - unmap the portion of all mmaps in the specified
|
|
|
|
+ * address_space corresponding to the specified page range in the underlying
|
|
|
|
+ * file.
|
|
|
|
+ *
|
|
* @mapping: the address space containing mmaps to be unmapped.
|
|
* @mapping: the address space containing mmaps to be unmapped.
|
|
* @holebegin: byte in first page to unmap, relative to the start of
|
|
* @holebegin: byte in first page to unmap, relative to the start of
|
|
* the underlying file. This will be rounded down to a PAGE_SIZE
|
|
* the underlying file. This will be rounded down to a PAGE_SIZE
|
|
@@ -2378,7 +2341,6 @@ void unmap_mapping_range(struct address_space *mapping,
|
|
}
|
|
}
|
|
|
|
|
|
details.check_mapping = even_cows? NULL: mapping;
|
|
details.check_mapping = even_cows? NULL: mapping;
|
|
- details.nonlinear_vma = NULL;
|
|
|
|
details.first_index = hba;
|
|
details.first_index = hba;
|
|
details.last_index = hba + hlen - 1;
|
|
details.last_index = hba + hlen - 1;
|
|
if (details.last_index < details.first_index)
|
|
if (details.last_index < details.first_index)
|
|
@@ -2388,8 +2350,6 @@ void unmap_mapping_range(struct address_space *mapping,
|
|
i_mmap_lock_write(mapping);
|
|
i_mmap_lock_write(mapping);
|
|
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
|
|
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
|
|
unmap_mapping_range_tree(&mapping->i_mmap, &details);
|
|
unmap_mapping_range_tree(&mapping->i_mmap, &details);
|
|
- if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
|
|
|
|
- unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
|
|
|
|
i_mmap_unlock_write(mapping);
|
|
i_mmap_unlock_write(mapping);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(unmap_mapping_range);
|
|
EXPORT_SYMBOL(unmap_mapping_range);
|