|
@@ -140,8 +140,8 @@ out:
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
|
|
|
- * the pages first, then submits them all for I/O. This avoids the very bad
|
|
|
+ * __do_page_cache_readahead() actually reads a chunk of disk. It allocates
|
|
|
+ * the pages first, then submits them for I/O. This avoids the very bad
|
|
|
* behaviour which would occur if page allocations are causing VM writeback.
|
|
|
* We really don't want to intermingle reads and writes like that.
|
|
|
*
|
|
@@ -177,8 +177,18 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
|
|
|
rcu_read_lock();
|
|
|
page = radix_tree_lookup(&mapping->i_pages, page_offset);
|
|
|
rcu_read_unlock();
|
|
|
- if (page && !radix_tree_exceptional_entry(page))
|
|
|
+ if (page && !radix_tree_exceptional_entry(page)) {
|
|
|
+ /*
|
|
|
+ * Page already present? Kick off the current batch of
|
|
|
+ * contiguous pages before continuing with the next
|
|
|
+ * batch.
|
|
|
+ */
|
|
|
+ if (nr_pages)
|
|
|
+ read_pages(mapping, filp, &page_pool, nr_pages,
|
|
|
+ gfp_mask);
|
|
|
+ nr_pages = 0;
|
|
|
continue;
|
|
|
+ }
|
|
|
|
|
|
page = __page_cache_alloc(gfp_mask);
|
|
|
if (!page)
|