|
@@ -25,44 +25,85 @@
|
|
|
#include <linux/rmap.h>
|
|
|
#include "internal.h"
|
|
|
|
|
|
-static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
|
|
|
- void *entry)
|
|
|
+/*
|
|
|
+ * Regular page slots are stabilized by the page lock even without the tree
|
|
|
+ * itself locked. These unlocked entries need verification under the tree
|
|
|
+ * lock.
|
|
|
+ */
|
|
|
+static inline void __clear_shadow_entry(struct address_space *mapping,
|
|
|
+ pgoff_t index, void *entry)
|
|
|
{
|
|
|
struct radix_tree_node *node;
|
|
|
void **slot;
|
|
|
|
|
|
- spin_lock_irq(&mapping->tree_lock);
|
|
|
- /*
|
|
|
- * Regular page slots are stabilized by the page lock even
|
|
|
- * without the tree itself locked. These unlocked entries
|
|
|
- * need verification under the tree lock.
|
|
|
- */
|
|
|
if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
|
|
|
- goto unlock;
|
|
|
+ return;
|
|
|
if (*slot != entry)
|
|
|
- goto unlock;
|
|
|
+ return;
|
|
|
__radix_tree_replace(&mapping->page_tree, node, slot, NULL,
|
|
|
workingset_update_node);
|
|
|
mapping->nrexceptional--;
|
|
|
-unlock:
|
|
|
+}
|
|
|
+
|
|
|
+static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
|
|
|
+ void *entry)
|
|
|
+{
|
|
|
+ spin_lock_irq(&mapping->tree_lock);
|
|
|
+ __clear_shadow_entry(mapping, index, entry);
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Unconditionally remove exceptional entry. Usually called from truncate path.
|
|
|
+ * Unconditionally remove exceptional entries. Usually called from truncate
|
|
|
+ * path. Note that the pagevec may be altered by this function by removing
|
|
|
+ * exceptional entries similar to what pagevec_remove_exceptionals does.
|
|
|
*/
|
|
|
-static void truncate_exceptional_entry(struct address_space *mapping,
|
|
|
- pgoff_t index, void *entry)
|
|
|
+static void truncate_exceptional_pvec_entries(struct address_space *mapping,
|
|
|
+ struct pagevec *pvec, pgoff_t *indices,
|
|
|
+ pgoff_t end)
|
|
|
{
|
|
|
+ int i, j;
|
|
|
+ bool dax, lock;
|
|
|
+
|
|
|
/* Handled by shmem itself */
|
|
|
if (shmem_mapping(mapping))
|
|
|
return;
|
|
|
|
|
|
- if (dax_mapping(mapping)) {
|
|
|
- dax_delete_mapping_entry(mapping, index);
|
|
|
+ for (j = 0; j < pagevec_count(pvec); j++)
|
|
|
+ if (radix_tree_exceptional_entry(pvec->pages[j]))
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (j == pagevec_count(pvec))
|
|
|
return;
|
|
|
+
|
|
|
+ dax = dax_mapping(mapping);
|
|
|
+ lock = !dax && indices[j] < end;
|
|
|
+ if (lock)
|
|
|
+ spin_lock_irq(&mapping->tree_lock);
|
|
|
+
|
|
|
+ for (i = j; i < pagevec_count(pvec); i++) {
|
|
|
+ struct page *page = pvec->pages[i];
|
|
|
+ pgoff_t index = indices[i];
|
|
|
+
|
|
|
+ if (!radix_tree_exceptional_entry(page)) {
|
|
|
+ pvec->pages[j++] = page;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (index >= end)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (unlikely(dax)) {
|
|
|
+ dax_delete_mapping_entry(mapping, index);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ __clear_shadow_entry(mapping, index, page);
|
|
|
}
|
|
|
- clear_shadow_entry(mapping, index, entry);
|
|
|
+
|
|
|
+ if (lock)
|
|
|
+ spin_unlock_irq(&mapping->tree_lock);
|
|
|
+ pvec->nr = j;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -310,11 +351,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
|
if (index >= end)
|
|
|
break;
|
|
|
|
|
|
- if (radix_tree_exceptional_entry(page)) {
|
|
|
- truncate_exceptional_entry(mapping, index,
|
|
|
- page);
|
|
|
+ if (radix_tree_exceptional_entry(page))
|
|
|
continue;
|
|
|
- }
|
|
|
|
|
|
if (!trylock_page(page))
|
|
|
continue;
|
|
@@ -334,12 +372,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
|
delete_from_page_cache_batch(mapping, &locked_pvec);
|
|
|
for (i = 0; i < pagevec_count(&locked_pvec); i++)
|
|
|
unlock_page(locked_pvec.pages[i]);
|
|
|
- pagevec_remove_exceptionals(&pvec);
|
|
|
+ truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
|
|
|
pagevec_release(&pvec);
|
|
|
cond_resched();
|
|
|
index++;
|
|
|
}
|
|
|
-
|
|
|
if (partial_start) {
|
|
|
struct page *page = find_lock_page(mapping, start - 1);
|
|
|
if (page) {
|
|
@@ -397,6 +434,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
|
pagevec_release(&pvec);
|
|
|
break;
|
|
|
}
|
|
|
+
|
|
|
for (i = 0; i < pagevec_count(&pvec); i++) {
|
|
|
struct page *page = pvec.pages[i];
|
|
|
|
|
@@ -408,11 +446,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (radix_tree_exceptional_entry(page)) {
|
|
|
- truncate_exceptional_entry(mapping, index,
|
|
|
- page);
|
|
|
+ if (radix_tree_exceptional_entry(page))
|
|
|
continue;
|
|
|
- }
|
|
|
|
|
|
lock_page(page);
|
|
|
WARN_ON(page_to_index(page) != index);
|
|
@@ -420,7 +455,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
|
truncate_inode_page(mapping, page);
|
|
|
unlock_page(page);
|
|
|
}
|
|
|
- pagevec_remove_exceptionals(&pvec);
|
|
|
+ truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
|
|
|
pagevec_release(&pvec);
|
|
|
index++;
|
|
|
}
|