|
@@ -107,12 +107,33 @@
|
|
* ->tasklist_lock (memory_failure, collect_procs_ao)
|
|
* ->tasklist_lock (memory_failure, collect_procs_ao)
|
|
*/
|
|
*/
|
|
|
|
|
|
|
|
+static void page_cache_tree_delete(struct address_space *mapping,
|
|
|
|
+ struct page *page, void *shadow)
|
|
|
|
+{
|
|
|
|
+ if (shadow) {
|
|
|
|
+ void **slot;
|
|
|
|
+
|
|
|
|
+ slot = radix_tree_lookup_slot(&mapping->page_tree, page->index);
|
|
|
|
+ radix_tree_replace_slot(slot, shadow);
|
|
|
|
+ mapping->nrshadows++;
|
|
|
|
+ /*
|
|
|
|
+ * Make sure the nrshadows update is committed before
|
|
|
|
+ * the nrpages update so that final truncate racing
|
|
|
|
+ * with reclaim does not see both counters 0 at the
|
|
|
|
+ * same time and miss a shadow entry.
|
|
|
|
+ */
|
|
|
|
+ smp_wmb();
|
|
|
|
+ } else
|
|
|
|
+ radix_tree_delete(&mapping->page_tree, page->index);
|
|
|
|
+ mapping->nrpages--;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Delete a page from the page cache and free it. Caller has to make
|
|
* Delete a page from the page cache and free it. Caller has to make
|
|
* sure the page is locked and that nobody else uses it - or that usage
|
|
* sure the page is locked and that nobody else uses it - or that usage
|
|
* is safe. The caller must hold the mapping's tree_lock.
|
|
* is safe. The caller must hold the mapping's tree_lock.
|
|
*/
|
|
*/
|
|
-void __delete_from_page_cache(struct page *page)
|
|
|
|
|
|
+void __delete_from_page_cache(struct page *page, void *shadow)
|
|
{
|
|
{
|
|
struct address_space *mapping = page->mapping;
|
|
struct address_space *mapping = page->mapping;
|
|
|
|
|
|
@@ -127,10 +148,11 @@ void __delete_from_page_cache(struct page *page)
|
|
else
|
|
else
|
|
cleancache_invalidate_page(mapping, page);
|
|
cleancache_invalidate_page(mapping, page);
|
|
|
|
|
|
- radix_tree_delete(&mapping->page_tree, page->index);
|
|
|
|
|
|
+ page_cache_tree_delete(mapping, page, shadow);
|
|
|
|
+
|
|
page->mapping = NULL;
|
|
page->mapping = NULL;
|
|
/* Leave page->index set: truncation lookup relies upon it */
|
|
/* Leave page->index set: truncation lookup relies upon it */
|
|
- mapping->nrpages--;
|
|
|
|
|
|
+
|
|
__dec_zone_page_state(page, NR_FILE_PAGES);
|
|
__dec_zone_page_state(page, NR_FILE_PAGES);
|
|
if (PageSwapBacked(page))
|
|
if (PageSwapBacked(page))
|
|
__dec_zone_page_state(page, NR_SHMEM);
|
|
__dec_zone_page_state(page, NR_SHMEM);
|
|
@@ -166,7 +188,7 @@ void delete_from_page_cache(struct page *page)
|
|
|
|
|
|
freepage = mapping->a_ops->freepage;
|
|
freepage = mapping->a_ops->freepage;
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
- __delete_from_page_cache(page);
|
|
|
|
|
|
+ __delete_from_page_cache(page, NULL);
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
mem_cgroup_uncharge_cache_page(page);
|
|
mem_cgroup_uncharge_cache_page(page);
|
|
|
|
|
|
@@ -426,7 +448,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
|
new->index = offset;
|
|
new->index = offset;
|
|
|
|
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
- __delete_from_page_cache(old);
|
|
|
|
|
|
+ __delete_from_page_cache(old, NULL);
|
|
error = radix_tree_insert(&mapping->page_tree, offset, new);
|
|
error = radix_tree_insert(&mapping->page_tree, offset, new);
|
|
BUG_ON(error);
|
|
BUG_ON(error);
|
|
mapping->nrpages++;
|
|
mapping->nrpages++;
|
|
@@ -460,6 +482,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
|
|
if (!radix_tree_exceptional_entry(p))
|
|
if (!radix_tree_exceptional_entry(p))
|
|
return -EEXIST;
|
|
return -EEXIST;
|
|
radix_tree_replace_slot(slot, page);
|
|
radix_tree_replace_slot(slot, page);
|
|
|
|
+ mapping->nrshadows--;
|
|
mapping->nrpages++;
|
|
mapping->nrpages++;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|