|
@@ -940,18 +940,18 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
|
|
|
|
|
|
}
|
|
|
|
|
|
-int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
|
|
|
+int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
|
|
|
{
|
|
|
- unsigned long nr_pages = cluster ? SWAPFILE_CLUSTER : 1;
|
|
|
+ unsigned long size = swap_entry_size(entry_size);
|
|
|
struct swap_info_struct *si, *next;
|
|
|
long avail_pgs;
|
|
|
int n_ret = 0;
|
|
|
int node;
|
|
|
|
|
|
/* Only single cluster request supported */
|
|
|
- WARN_ON_ONCE(n_goal > 1 && cluster);
|
|
|
+ WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
|
|
|
|
|
|
- avail_pgs = atomic_long_read(&nr_swap_pages) / nr_pages;
|
|
|
+ avail_pgs = atomic_long_read(&nr_swap_pages) / size;
|
|
|
if (avail_pgs <= 0)
|
|
|
goto noswap;
|
|
|
|
|
@@ -961,7 +961,7 @@ int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
|
|
|
if (n_goal > avail_pgs)
|
|
|
n_goal = avail_pgs;
|
|
|
|
|
|
- atomic_long_sub(n_goal * nr_pages, &nr_swap_pages);
|
|
|
+ atomic_long_sub(n_goal * size, &nr_swap_pages);
|
|
|
|
|
|
spin_lock(&swap_avail_lock);
|
|
|
|
|
@@ -988,14 +988,14 @@ start_over:
|
|
|
spin_unlock(&si->lock);
|
|
|
goto nextsi;
|
|
|
}
|
|
|
- if (cluster) {
|
|
|
+ if (size == SWAPFILE_CLUSTER) {
|
|
|
if (!(si->flags & SWP_FILE))
|
|
|
n_ret = swap_alloc_cluster(si, swp_entries);
|
|
|
} else
|
|
|
n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
|
|
|
n_goal, swp_entries);
|
|
|
spin_unlock(&si->lock);
|
|
|
- if (n_ret || cluster)
|
|
|
+ if (n_ret || size == SWAPFILE_CLUSTER)
|
|
|
goto check_out;
|
|
|
pr_debug("scan_swap_map of si %d failed to find offset\n",
|
|
|
si->type);
|
|
@@ -1021,7 +1021,7 @@ nextsi:
|
|
|
|
|
|
check_out:
|
|
|
if (n_ret < n_goal)
|
|
|
- atomic_long_add((long)(n_goal - n_ret) * nr_pages,
|
|
|
+ atomic_long_add((long)(n_goal - n_ret) * size,
|
|
|
&nr_swap_pages);
|
|
|
noswap:
|
|
|
return n_ret;
|