|
@@ -981,26 +981,6 @@ out:
|
|
|
}
|
|
|
EXPORT_SYMBOL(find_get_entry);
|
|
|
|
|
|
-/**
|
|
|
- * find_get_page - find and get a page reference
|
|
|
- * @mapping: the address_space to search
|
|
|
- * @offset: the page index
|
|
|
- *
|
|
|
- * Looks up the page cache slot at @mapping & @offset. If there is a
|
|
|
- * page cache page, it is returned with an increased refcount.
|
|
|
- *
|
|
|
- * Otherwise, %NULL is returned.
|
|
|
- */
|
|
|
-struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
|
|
|
-{
|
|
|
- struct page *page = find_get_entry(mapping, offset);
|
|
|
-
|
|
|
- if (radix_tree_exceptional_entry(page))
|
|
|
- page = NULL;
|
|
|
- return page;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(find_get_page);
|
|
|
-
|
|
|
/**
|
|
|
* find_lock_entry - locate, pin and lock a page cache entry
|
|
|
* @mapping: the address_space to search
|
|
@@ -1038,66 +1018,84 @@ repeat:
|
|
|
EXPORT_SYMBOL(find_lock_entry);
|
|
|
|
|
|
/**
|
|
|
- * find_lock_page - locate, pin and lock a pagecache page
|
|
|
+ * pagecache_get_page - find and get a page reference
|
|
|
* @mapping: the address_space to search
|
|
|
* @offset: the page index
|
|
|
+ * @fgp_flags: PCG flags
|
|
|
+ * @gfp_mask: gfp mask to use if a page is to be allocated
|
|
|
*
|
|
|
- * Looks up the page cache slot at @mapping & @offset. If there is a
|
|
|
- * page cache page, it is returned locked and with an increased
|
|
|
- * refcount.
|
|
|
- *
|
|
|
- * Otherwise, %NULL is returned.
|
|
|
- *
|
|
|
- * find_lock_page() may sleep.
|
|
|
- */
|
|
|
-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
|
|
|
-{
|
|
|
- struct page *page = find_lock_entry(mapping, offset);
|
|
|
-
|
|
|
- if (radix_tree_exceptional_entry(page))
|
|
|
- page = NULL;
|
|
|
- return page;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(find_lock_page);
|
|
|
-
|
|
|
-/**
|
|
|
- * find_or_create_page - locate or add a pagecache page
|
|
|
- * @mapping: the page's address_space
|
|
|
- * @index: the page's index into the mapping
|
|
|
- * @gfp_mask: page allocation mode
|
|
|
+ * Looks up the page cache slot at @mapping & @offset.
|
|
|
*
|
|
|
- * Looks up the page cache slot at @mapping & @offset. If there is a
|
|
|
- * page cache page, it is returned locked and with an increased
|
|
|
- * refcount.
|
|
|
+ * PCG flags modify how the page is returned
|
|
|
*
|
|
|
- * If the page is not present, a new page is allocated using @gfp_mask
|
|
|
- * and added to the page cache and the VM's LRU list. The page is
|
|
|
- * returned locked and with an increased refcount.
|
|
|
+ * FGP_ACCESSED: the page will be marked accessed
|
|
|
+ * FGP_LOCK: Page is return locked
|
|
|
+ * FGP_CREAT: If page is not present then a new page is allocated using
|
|
|
+ * @gfp_mask and added to the page cache and the VM's LRU
|
|
|
+ * list. The page is returned locked and with an increased
|
|
|
+ * refcount. Otherwise, %NULL is returned.
|
|
|
*
|
|
|
- * On memory exhaustion, %NULL is returned.
|
|
|
+ * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
|
|
|
+ * if the GFP flags specified for FGP_CREAT are atomic.
|
|
|
*
|
|
|
- * find_or_create_page() may sleep, even if @gfp_flags specifies an
|
|
|
- * atomic allocation!
|
|
|
+ * If there is a page cache page, it is returned with an increased refcount.
|
|
|
*/
|
|
|
-struct page *find_or_create_page(struct address_space *mapping,
|
|
|
- pgoff_t index, gfp_t gfp_mask)
|
|
|
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
|
|
|
+ int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
|
|
|
{
|
|
|
struct page *page;
|
|
|
- int err;
|
|
|
+
|
|
|
repeat:
|
|
|
- page = find_lock_page(mapping, index);
|
|
|
- if (!page) {
|
|
|
- page = __page_cache_alloc(gfp_mask);
|
|
|
+ page = find_get_entry(mapping, offset);
|
|
|
+ if (radix_tree_exceptional_entry(page))
|
|
|
+ page = NULL;
|
|
|
+ if (!page)
|
|
|
+ goto no_page;
|
|
|
+
|
|
|
+ if (fgp_flags & FGP_LOCK) {
|
|
|
+ if (fgp_flags & FGP_NOWAIT) {
|
|
|
+ if (!trylock_page(page)) {
|
|
|
+ page_cache_release(page);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ lock_page(page);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Has the page been truncated? */
|
|
|
+ if (unlikely(page->mapping != mapping)) {
|
|
|
+ unlock_page(page);
|
|
|
+ page_cache_release(page);
|
|
|
+ goto repeat;
|
|
|
+ }
|
|
|
+ VM_BUG_ON_PAGE(page->index != offset, page);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (page && (fgp_flags & FGP_ACCESSED))
|
|
|
+ mark_page_accessed(page);
|
|
|
+
|
|
|
+no_page:
|
|
|
+ if (!page && (fgp_flags & FGP_CREAT)) {
|
|
|
+ int err;
|
|
|
+ if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
|
|
|
+ cache_gfp_mask |= __GFP_WRITE;
|
|
|
+ if (fgp_flags & FGP_NOFS) {
|
|
|
+ cache_gfp_mask &= ~__GFP_FS;
|
|
|
+ radix_gfp_mask &= ~__GFP_FS;
|
|
|
+ }
|
|
|
+
|
|
|
+ page = __page_cache_alloc(cache_gfp_mask);
|
|
|
if (!page)
|
|
|
return NULL;
|
|
|
- /*
|
|
|
- * We want a regular kernel memory (not highmem or DMA etc)
|
|
|
- * allocation for the radix tree nodes, but we need to honour
|
|
|
- * the context-specific requirements the caller has asked for.
|
|
|
- * GFP_RECLAIM_MASK collects those requirements.
|
|
|
- */
|
|
|
- err = add_to_page_cache_lru(page, mapping, index,
|
|
|
- (gfp_mask & GFP_RECLAIM_MASK));
|
|
|
+
|
|
|
+ if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
|
|
|
+ fgp_flags |= FGP_LOCK;
|
|
|
+
|
|
|
+ /* Init accessed so avoit atomic mark_page_accessed later */
|
|
|
+ if (fgp_flags & FGP_ACCESSED)
|
|
|
+ init_page_accessed(page);
|
|
|
+
|
|
|
+ err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
|
|
|
if (unlikely(err)) {
|
|
|
page_cache_release(page);
|
|
|
page = NULL;
|
|
@@ -1105,9 +1103,10 @@ repeat:
|
|
|
goto repeat;
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
return page;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(find_or_create_page);
|
|
|
+EXPORT_SYMBOL(pagecache_get_page);
|
|
|
|
|
|
/**
|
|
|
* find_get_entries - gang pagecache lookup
|
|
@@ -1404,39 +1403,6 @@ repeat:
|
|
|
}
|
|
|
EXPORT_SYMBOL(find_get_pages_tag);
|
|
|
|
|
|
-/**
|
|
|
- * grab_cache_page_nowait - returns locked page at given index in given cache
|
|
|
- * @mapping: target address_space
|
|
|
- * @index: the page index
|
|
|
- *
|
|
|
- * Same as grab_cache_page(), but do not wait if the page is unavailable.
|
|
|
- * This is intended for speculative data generators, where the data can
|
|
|
- * be regenerated if the page couldn't be grabbed. This routine should
|
|
|
- * be safe to call while holding the lock for another page.
|
|
|
- *
|
|
|
- * Clear __GFP_FS when allocating the page to avoid recursion into the fs
|
|
|
- * and deadlock against the caller's locked page.
|
|
|
- */
|
|
|
-struct page *
|
|
|
-grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
|
|
|
-{
|
|
|
- struct page *page = find_get_page(mapping, index);
|
|
|
-
|
|
|
- if (page) {
|
|
|
- if (trylock_page(page))
|
|
|
- return page;
|
|
|
- page_cache_release(page);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
|
|
|
- if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
|
|
|
- page_cache_release(page);
|
|
|
- page = NULL;
|
|
|
- }
|
|
|
- return page;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(grab_cache_page_nowait);
|
|
|
-
|
|
|
/*
|
|
|
* CD/DVDs are error prone. When a medium error occurs, the driver may fail
|
|
|
* a _large_ part of the i/o request. Imagine the worst scenario:
|
|
@@ -2406,7 +2372,6 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
|
|
|
{
|
|
|
const struct address_space_operations *aops = mapping->a_ops;
|
|
|
|
|
|
- mark_page_accessed(page);
|
|
|
return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
|
|
|
}
|
|
|
EXPORT_SYMBOL(pagecache_write_end);
|
|
@@ -2488,34 +2453,18 @@ EXPORT_SYMBOL(generic_file_direct_write);
|
|
|
struct page *grab_cache_page_write_begin(struct address_space *mapping,
|
|
|
pgoff_t index, unsigned flags)
|
|
|
{
|
|
|
- int status;
|
|
|
- gfp_t gfp_mask;
|
|
|
struct page *page;
|
|
|
- gfp_t gfp_notmask = 0;
|
|
|
+ int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT;
|
|
|
|
|
|
- gfp_mask = mapping_gfp_mask(mapping);
|
|
|
- if (mapping_cap_account_dirty(mapping))
|
|
|
- gfp_mask |= __GFP_WRITE;
|
|
|
if (flags & AOP_FLAG_NOFS)
|
|
|
- gfp_notmask = __GFP_FS;
|
|
|
-repeat:
|
|
|
- page = find_lock_page(mapping, index);
|
|
|
+ fgp_flags |= FGP_NOFS;
|
|
|
+
|
|
|
+ page = pagecache_get_page(mapping, index, fgp_flags,
|
|
|
+ mapping_gfp_mask(mapping),
|
|
|
+ GFP_KERNEL);
|
|
|
if (page)
|
|
|
- goto found;
|
|
|
+ wait_for_stable_page(page);
|
|
|
|
|
|
- page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
|
|
|
- if (!page)
|
|
|
- return NULL;
|
|
|
- status = add_to_page_cache_lru(page, mapping, index,
|
|
|
- GFP_KERNEL & ~gfp_notmask);
|
|
|
- if (unlikely(status)) {
|
|
|
- page_cache_release(page);
|
|
|
- if (status == -EEXIST)
|
|
|
- goto repeat;
|
|
|
- return NULL;
|
|
|
- }
|
|
|
-found:
|
|
|
- wait_for_stable_page(page);
|
|
|
return page;
|
|
|
}
|
|
|
EXPORT_SYMBOL(grab_cache_page_write_begin);
|
|
@@ -2564,7 +2513,7 @@ again:
|
|
|
|
|
|
status = a_ops->write_begin(file, mapping, pos, bytes, flags,
|
|
|
&page, &fsdata);
|
|
|
- if (unlikely(status))
|
|
|
+ if (unlikely(status < 0))
|
|
|
break;
|
|
|
|
|
|
if (mapping_writably_mapped(mapping))
|
|
@@ -2573,7 +2522,6 @@ again:
|
|
|
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
|
|
|
flush_dcache_page(page);
|
|
|
|
|
|
- mark_page_accessed(page);
|
|
|
status = a_ops->write_end(file, mapping, pos, bytes, copied,
|
|
|
page, fsdata);
|
|
|
if (unlikely(status < 0))
|