|
@@ -110,6 +110,62 @@
|
|
|
* ->tasklist_lock (memory_failure, collect_procs_ao)
|
|
|
*/
|
|
|
|
|
|
+static int page_cache_tree_insert(struct address_space *mapping,
|
|
|
+ struct page *page, void **shadowp)
|
|
|
+{
|
|
|
+ struct radix_tree_node *node;
|
|
|
+ void **slot;
|
|
|
+ int error;
|
|
|
+
|
|
|
+ error = __radix_tree_create(&mapping->page_tree, page->index, 0,
|
|
|
+ &node, &slot);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ if (*slot) {
|
|
|
+ void *p;
|
|
|
+
|
|
|
+ p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
|
|
|
+ if (!radix_tree_exceptional_entry(p))
|
|
|
+ return -EEXIST;
|
|
|
+
|
|
|
+ mapping->nrexceptional--;
|
|
|
+ if (!dax_mapping(mapping)) {
|
|
|
+ if (shadowp)
|
|
|
+ *shadowp = p;
|
|
|
+ if (node)
|
|
|
+ workingset_node_shadows_dec(node);
|
|
|
+ } else {
|
|
|
+ /* DAX can replace empty locked entry with a hole */
|
|
|
+ WARN_ON_ONCE(p !=
|
|
|
+ (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
|
|
|
+ RADIX_DAX_ENTRY_LOCK));
|
|
|
+ /* DAX accounts exceptional entries as normal pages */
|
|
|
+ if (node)
|
|
|
+ workingset_node_pages_dec(node);
|
|
|
+ /* Wakeup waiters for exceptional entry lock */
|
|
|
+ dax_wake_mapping_entry_waiter(mapping, page->index,
|
|
|
+ false);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ radix_tree_replace_slot(slot, page);
|
|
|
+ mapping->nrpages++;
|
|
|
+ if (node) {
|
|
|
+ workingset_node_pages_inc(node);
|
|
|
+ /*
|
|
|
+ * Don't track node that contains actual pages.
|
|
|
+ *
|
|
|
+ * Avoid acquiring the list_lru lock if already
|
|
|
+ * untracked. The list_empty() test is safe as
|
|
|
+ * node->private_list is protected by
|
|
|
+ * mapping->tree_lock.
|
|
|
+ */
|
|
|
+ if (!list_empty(&node->private_list))
|
|
|
+ list_lru_del(&workingset_shadow_nodes,
|
|
|
+ &node->private_list);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static void page_cache_tree_delete(struct address_space *mapping,
|
|
|
struct page *page, void *shadow)
|
|
|
{
|
|
@@ -561,7 +617,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
|
|
|
|
|
spin_lock_irqsave(&mapping->tree_lock, flags);
|
|
|
__delete_from_page_cache(old, NULL);
|
|
|
- error = radix_tree_insert(&mapping->page_tree, offset, new);
|
|
|
+ error = page_cache_tree_insert(mapping, new, NULL);
|
|
|
BUG_ON(error);
|
|
|
mapping->nrpages++;
|
|
|
|
|
@@ -584,62 +640,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
|
|
|
|
|
-static int page_cache_tree_insert(struct address_space *mapping,
|
|
|
- struct page *page, void **shadowp)
|
|
|
-{
|
|
|
- struct radix_tree_node *node;
|
|
|
- void **slot;
|
|
|
- int error;
|
|
|
-
|
|
|
- error = __radix_tree_create(&mapping->page_tree, page->index, 0,
|
|
|
- &node, &slot);
|
|
|
- if (error)
|
|
|
- return error;
|
|
|
- if (*slot) {
|
|
|
- void *p;
|
|
|
-
|
|
|
- p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
|
|
|
- if (!radix_tree_exceptional_entry(p))
|
|
|
- return -EEXIST;
|
|
|
-
|
|
|
- mapping->nrexceptional--;
|
|
|
- if (!dax_mapping(mapping)) {
|
|
|
- if (shadowp)
|
|
|
- *shadowp = p;
|
|
|
- if (node)
|
|
|
- workingset_node_shadows_dec(node);
|
|
|
- } else {
|
|
|
- /* DAX can replace empty locked entry with a hole */
|
|
|
- WARN_ON_ONCE(p !=
|
|
|
- (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
|
|
|
- RADIX_DAX_ENTRY_LOCK));
|
|
|
- /* DAX accounts exceptional entries as normal pages */
|
|
|
- if (node)
|
|
|
- workingset_node_pages_dec(node);
|
|
|
- /* Wakeup waiters for exceptional entry lock */
|
|
|
- dax_wake_mapping_entry_waiter(mapping, page->index,
|
|
|
- false);
|
|
|
- }
|
|
|
- }
|
|
|
- radix_tree_replace_slot(slot, page);
|
|
|
- mapping->nrpages++;
|
|
|
- if (node) {
|
|
|
- workingset_node_pages_inc(node);
|
|
|
- /*
|
|
|
- * Don't track node that contains actual pages.
|
|
|
- *
|
|
|
- * Avoid acquiring the list_lru lock if already
|
|
|
- * untracked. The list_empty() test is safe as
|
|
|
- * node->private_list is protected by
|
|
|
- * mapping->tree_lock.
|
|
|
- */
|
|
|
- if (!list_empty(&node->private_list))
|
|
|
- list_lru_del(&workingset_shadow_nodes,
|
|
|
- &node->private_list);
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static int __add_to_page_cache_locked(struct page *page,
|
|
|
struct address_space *mapping,
|
|
|
pgoff_t offset, gfp_t gfp_mask,
|