|
@@ -17,6 +17,7 @@
|
|
|
#include <linux/blkdev.h>
|
|
|
#include <linux/pagevec.h>
|
|
|
#include <linux/migrate.h>
|
|
|
+#include <linux/vmalloc.h>
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
|
@@ -32,15 +33,8 @@ static const struct address_space_operations swap_aops = {
|
|
|
#endif
|
|
|
};
|
|
|
|
|
|
-struct address_space swapper_spaces[MAX_SWAPFILES] = {
|
|
|
- [0 ... MAX_SWAPFILES - 1] = {
|
|
|
- .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
|
|
|
- .i_mmap_writable = ATOMIC_INIT(0),
|
|
|
- .a_ops = &swap_aops,
|
|
|
- /* swap cache doesn't use writeback related tags */
|
|
|
- .flags = 1 << AS_NO_WRITEBACK_TAGS,
|
|
|
- }
|
|
|
-};
|
|
|
+struct address_space *swapper_spaces[MAX_SWAPFILES];
|
|
|
+static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
|
|
|
|
|
|
#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
|
|
|
|
|
@@ -53,11 +47,26 @@ static struct {
|
|
|
|
|
|
unsigned long total_swapcache_pages(void)
|
|
|
{
|
|
|
- int i;
|
|
|
+ unsigned int i, j, nr;
|
|
|
unsigned long ret = 0;
|
|
|
+ struct address_space *spaces;
|
|
|
|
|
|
- for (i = 0; i < MAX_SWAPFILES; i++)
|
|
|
- ret += swapper_spaces[i].nrpages;
|
|
|
+ rcu_read_lock();
|
|
|
+ for (i = 0; i < MAX_SWAPFILES; i++) {
|
|
|
+ /*
|
|
|
+ * The corresponding entries in nr_swapper_spaces and
|
|
|
+ * swapper_spaces will be reused only after at least
|
|
|
+ * one grace period. So it is impossible for them
|
|
|
+ * belongs to different usage.
|
|
|
+ */
|
|
|
+ nr = nr_swapper_spaces[i];
|
|
|
+ spaces = rcu_dereference(swapper_spaces[i]);
|
|
|
+ if (!nr || !spaces)
|
|
|
+ continue;
|
|
|
+ for (j = 0; j < nr; j++)
|
|
|
+ ret += spaces[j].nrpages;
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -505,3 +514,38 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
|
|
|
skip:
|
|
|
return read_swap_cache_async(entry, gfp_mask, vma, addr);
|
|
|
}
|
|
|
+
|
|
|
+int init_swap_address_space(unsigned int type, unsigned long nr_pages)
|
|
|
+{
|
|
|
+ struct address_space *spaces, *space;
|
|
|
+ unsigned int i, nr;
|
|
|
+
|
|
|
+ nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
|
|
|
+ spaces = vzalloc(sizeof(struct address_space) * nr);
|
|
|
+ if (!spaces)
|
|
|
+ return -ENOMEM;
|
|
|
+ for (i = 0; i < nr; i++) {
|
|
|
+ space = spaces + i;
|
|
|
+ INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN);
|
|
|
+ atomic_set(&space->i_mmap_writable, 0);
|
|
|
+ space->a_ops = &swap_aops;
|
|
|
+ /* swap cache doesn't use writeback related tags */
|
|
|
+ mapping_set_no_writeback_tags(space);
|
|
|
+ spin_lock_init(&space->tree_lock);
|
|
|
+ }
|
|
|
+ nr_swapper_spaces[type] = nr;
|
|
|
+ rcu_assign_pointer(swapper_spaces[type], spaces);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void exit_swap_address_space(unsigned int type)
|
|
|
+{
|
|
|
+ struct address_space *spaces;
|
|
|
+
|
|
|
+ spaces = swapper_spaces[type];
|
|
|
+ nr_swapper_spaces[type] = 0;
|
|
|
+ rcu_assign_pointer(swapper_spaces[type], NULL);
|
|
|
+ synchronize_rcu();
|
|
|
+ kvfree(spaces);
|
|
|
+}
|