|
@@ -89,7 +89,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
|
|
|
page = lru_to_page(pages);
|
|
|
list_del(&page->lru);
|
|
|
if (add_to_page_cache_lru(page, mapping, page->index,
|
|
|
- mapping_gfp_constraint(mapping, GFP_KERNEL))) {
|
|
|
+ readahead_gfp_mask(mapping))) {
|
|
|
read_cache_pages_invalidate_page(mapping, page);
|
|
|
continue;
|
|
|
}
|
|
@@ -108,7 +108,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
|
|
|
EXPORT_SYMBOL(read_cache_pages);
|
|
|
|
|
|
static int read_pages(struct address_space *mapping, struct file *filp,
|
|
|
- struct list_head *pages, unsigned nr_pages)
|
|
|
+ struct list_head *pages, unsigned int nr_pages, gfp_t gfp)
|
|
|
{
|
|
|
struct blk_plug plug;
|
|
|
unsigned page_idx;
|
|
@@ -126,10 +126,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
|
|
|
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
|
|
|
struct page *page = lru_to_page(pages);
|
|
|
list_del(&page->lru);
|
|
|
- if (!add_to_page_cache_lru(page, mapping, page->index,
|
|
|
- mapping_gfp_constraint(mapping, GFP_KERNEL))) {
|
|
|
+ if (!add_to_page_cache_lru(page, mapping, page->index, gfp))
|
|
|
mapping->a_ops->readpage(filp, page);
|
|
|
- }
|
|
|
put_page(page);
|
|
|
}
|
|
|
ret = 0;
|
|
@@ -159,6 +157,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
|
int page_idx;
|
|
|
int ret = 0;
|
|
|
loff_t isize = i_size_read(inode);
|
|
|
+ gfp_t gfp_mask = readahead_gfp_mask(mapping);
|
|
|
|
|
|
if (isize == 0)
|
|
|
goto out;
|
|
@@ -180,7 +179,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
|
if (page && !radix_tree_exceptional_entry(page))
|
|
|
continue;
|
|
|
|
|
|
- page = page_cache_alloc_readahead(mapping);
|
|
|
+ page = __page_cache_alloc(gfp_mask);
|
|
|
if (!page)
|
|
|
break;
|
|
|
page->index = page_offset;
|
|
@@ -196,7 +195,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
|
* will then handle the error.
|
|
|
*/
|
|
|
if (ret)
|
|
|
- read_pages(mapping, filp, &page_pool, ret);
|
|
|
+ read_pages(mapping, filp, &page_pool, ret, gfp_mask);
|
|
|
BUG_ON(!list_empty(&page_pool));
|
|
|
out:
|
|
|
return ret;
|