|
@@ -412,14 +412,14 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|
|
* the swap entry is no longer in use.
|
|
|
*/
|
|
|
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|
|
- struct vm_area_struct *vma, unsigned long addr)
|
|
|
+ struct vm_area_struct *vma, unsigned long addr, bool do_poll)
|
|
|
{
|
|
|
bool page_was_allocated;
|
|
|
struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
|
|
|
vma, addr, &page_was_allocated);
|
|
|
|
|
|
if (page_was_allocated)
|
|
|
- swap_readpage(retpage);
|
|
|
+ swap_readpage(retpage, do_poll);
|
|
|
|
|
|
return retpage;
|
|
|
}
|
|
@@ -496,11 +496,13 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
|
|
|
unsigned long start_offset, end_offset;
|
|
|
unsigned long mask;
|
|
|
struct blk_plug plug;
|
|
|
+ bool do_poll = true;
|
|
|
|
|
|
mask = swapin_nr_pages(offset) - 1;
|
|
|
if (!mask)
|
|
|
goto skip;
|
|
|
|
|
|
+ do_poll = false;
|
|
|
/* Read a page_cluster sized and aligned cluster around offset. */
|
|
|
start_offset = offset & ~mask;
|
|
|
end_offset = offset | mask;
|
|
@@ -511,7 +513,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
|
|
|
for (offset = start_offset; offset <= end_offset ; offset++) {
|
|
|
/* Ok, do the async read-ahead now */
|
|
|
page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
|
|
|
- gfp_mask, vma, addr);
|
|
|
+ gfp_mask, vma, addr, false);
|
|
|
if (!page)
|
|
|
continue;
|
|
|
if (offset != entry_offset && likely(!PageTransCompound(page)))
|
|
@@ -522,7 +524,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
|
|
|
|
|
|
lru_add_drain(); /* Push any new pages onto the LRU now */
|
|
|
skip:
|
|
|
- return read_swap_cache_async(entry, gfp_mask, vma, addr);
|
|
|
+ return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
|
|
|
}
|
|
|
|
|
|
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
|