|
@@ -293,7 +293,7 @@ static bool shmem_confirm_swap(struct address_space *mapping,
|
|
|
*/
|
|
|
static int shmem_add_to_page_cache(struct page *page,
|
|
|
struct address_space *mapping,
|
|
|
- pgoff_t index, gfp_t gfp, void *expected)
|
|
|
+ pgoff_t index, void *expected)
|
|
|
{
|
|
|
int error;
|
|
|
|
|
@@ -666,7 +666,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
|
|
|
*/
|
|
|
if (!error)
|
|
|
error = shmem_add_to_page_cache(*pagep, mapping, index,
|
|
|
- GFP_NOWAIT, radswap);
|
|
|
+ radswap);
|
|
|
if (error != -ENOMEM) {
|
|
|
/*
|
|
|
* Truncation and eviction use free_swap_and_cache(), which
|
|
@@ -1112,7 +1112,7 @@ repeat:
|
|
|
gfp & GFP_RECLAIM_MASK);
|
|
|
if (!error) {
|
|
|
error = shmem_add_to_page_cache(page, mapping, index,
|
|
|
- gfp, swp_to_radix_entry(swap));
|
|
|
+ swp_to_radix_entry(swap));
|
|
|
/*
|
|
|
* We already confirmed swap under page lock, and make
|
|
|
* no memory allocation here, so usually no possibility
|
|
@@ -1175,7 +1175,7 @@ repeat:
|
|
|
error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
|
|
|
if (!error) {
|
|
|
error = shmem_add_to_page_cache(page, mapping, index,
|
|
|
- gfp, NULL);
|
|
|
+ NULL);
|
|
|
radix_tree_preload_end();
|
|
|
}
|
|
|
if (error) {
|