|
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
|
|
return;
|
|
|
|
|
|
index = start;
|
|
|
- for ( ; ; ) {
|
|
|
+ while (index < end) {
|
|
|
cond_resched();
|
|
|
|
|
|
pvec.nr = find_get_entries(mapping, index,
|
|
|
min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
|
|
pvec.pages, indices);
|
|
|
if (!pvec.nr) {
|
|
|
- if (index == start || unfalloc)
|
|
|
+ /* If all gone or hole-punch or unfalloc, we're done */
|
|
|
+ if (index == start || end != -1)
|
|
|
break;
|
|
|
+ /* But if truncating, restart to make sure all gone */
|
|
|
index = start;
|
|
|
continue;
|
|
|
}
|
|
|
- if ((index == start || unfalloc) && indices[0] >= end) {
|
|
|
- pagevec_remove_exceptionals(&pvec);
|
|
|
- pagevec_release(&pvec);
|
|
|
- break;
|
|
|
- }
|
|
|
mem_cgroup_uncharge_start();
|
|
|
for (i = 0; i < pagevec_count(&pvec); i++) {
|
|
|
struct page *page = pvec.pages[i];
|
|
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
|
|
if (radix_tree_exceptional_entry(page)) {
|
|
|
if (unfalloc)
|
|
|
continue;
|
|
|
- nr_swaps_freed += !shmem_free_swap(mapping,
|
|
|
- index, page);
|
|
|
+ if (shmem_free_swap(mapping, index, page)) {
|
|
|
+ /* Swap was replaced by page: retry */
|
|
|
+ index--;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ nr_swaps_freed++;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
|
|
if (page->mapping == mapping) {
|
|
|
VM_BUG_ON_PAGE(PageWriteback(page), page);
|
|
|
truncate_inode_page(mapping, page);
|
|
|
+ } else {
|
|
|
+ /* Page was replaced by swap: retry */
|
|
|
+ unlock_page(page);
|
|
|
+ index--;
|
|
|
+ break;
|
|
|
}
|
|
|
}
|
|
|
unlock_page(page);
|