|
@@ -35,7 +35,8 @@ static void clear_exceptional_entry(struct address_space *mapping,
|
|
|
* without the tree itself locked. These unlocked entries
|
|
|
* need verification under the tree lock.
|
|
|
*/
|
|
|
- radix_tree_delete_item(&mapping->page_tree, index, entry);
|
|
|
+ if (radix_tree_delete_item(&mapping->page_tree, index, entry) == entry)
|
|
|
+ mapping->nrshadows--;
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
|
}
|
|
|
|
|
@@ -229,7 +230,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|
|
int i;
|
|
|
|
|
|
cleancache_invalidate_inode(mapping);
|
|
|
- if (mapping->nrpages == 0)
|
|
|
+ if (mapping->nrpages == 0 && mapping->nrshadows == 0)
|
|
|
return;
|
|
|
|
|
|
/* Offsets within partial pages */
|
|
@@ -391,6 +392,53 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
|
|
|
}
|
|
|
EXPORT_SYMBOL(truncate_inode_pages);
|
|
|
|
|
|
+/**
|
|
|
+ * truncate_inode_pages_final - truncate *all* pages before inode dies
|
|
|
+ * @mapping: mapping to truncate
|
|
|
+ *
|
|
|
+ * Called under (and serialized by) inode->i_mutex.
|
|
|
+ *
|
|
|
+ * Filesystems have to use this in the .evict_inode path to inform the
|
|
|
+ * VM that this is the final truncate and the inode is going away.
|
|
|
+ */
|
|
|
+void truncate_inode_pages_final(struct address_space *mapping)
|
|
|
+{
|
|
|
+ unsigned long nrshadows;
|
|
|
+ unsigned long nrpages;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Page reclaim can not participate in regular inode lifetime
|
|
|
+ * management (can't call iput()) and thus can race with the
|
|
|
+ * inode teardown. Tell it when the address space is exiting,
|
|
|
+ * so that it does not install eviction information after the
|
|
|
+ * final truncate has begun.
|
|
|
+ */
|
|
|
+ mapping_set_exiting(mapping);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * When reclaim installs eviction entries, it increases
|
|
|
+ * nrshadows first, then decreases nrpages. Make sure we see
|
|
|
+ * this in the right order or we might miss an entry.
|
|
|
+ */
|
|
|
+ nrpages = mapping->nrpages;
|
|
|
+ smp_rmb();
|
|
|
+ nrshadows = mapping->nrshadows;
|
|
|
+
|
|
|
+ if (nrpages || nrshadows) {
|
|
|
+ /*
|
|
|
+ * As truncation uses a lockless tree lookup, cycle
|
|
|
+ * the tree lock to make sure any ongoing tree
|
|
|
+ * modification that does not see AS_EXITING is
|
|
|
+ * completed before starting the final truncate.
|
|
|
+ */
|
|
|
+ spin_lock_irq(&mapping->tree_lock);
|
|
|
+ spin_unlock_irq(&mapping->tree_lock);
|
|
|
+
|
|
|
+ truncate_inode_pages(mapping, 0);
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(truncate_inode_pages_final);
|
|
|
+
|
|
|
/**
|
|
|
* invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
|
|
|
* @mapping: the address_space which holds the pages to invalidate
|
|
@@ -484,7 +532,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
|
|
|
goto failed;
|
|
|
|
|
|
BUG_ON(page_has_private(page));
|
|
|
- __delete_from_page_cache(page);
|
|
|
+ __delete_from_page_cache(page, NULL);
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
|
mem_cgroup_uncharge_cache_page(page);
|
|
|
|