|
@@ -49,6 +49,8 @@
|
|
|
static u64 zswap_pool_total_size;
|
|
|
/* The number of compressed pages currently stored in zswap */
|
|
|
static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
|
|
|
+/* The number of same-value filled pages currently stored in zswap */
|
|
|
+static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
|
|
|
|
|
|
/*
|
|
|
* The statistics below are not protected from concurrent access for
|
|
@@ -116,6 +118,11 @@ module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
|
|
|
static unsigned int zswap_max_pool_percent = 20;
|
|
|
module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
|
|
|
|
|
|
+/* Enable/disable handling same-value filled pages (enabled by default) */
|
|
|
+static bool zswap_same_filled_pages_enabled = true;
|
|
|
+module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
|
|
|
+ bool, 0644);
|
|
|
+
|
|
|
/*********************************
|
|
|
* data structures
|
|
|
**********************************/
|
|
@@ -145,9 +152,10 @@ struct zswap_pool {
|
|
|
* be held while changing the refcount. Since the lock must
|
|
|
* be held, there is no reason to also make refcount atomic.
|
|
|
* length - the length in bytes of the compressed page data. Needed during
|
|
|
- * decompression
|
|
|
+ * decompression. For a same value filled page length is 0.
|
|
|
* pool - the zswap_pool the entry's data is in
|
|
|
* handle - zpool allocation handle that stores the compressed page data
|
|
|
+ * value - value of the same-value filled pages which have same content
|
|
|
*/
|
|
|
struct zswap_entry {
|
|
|
struct rb_node rbnode;
|
|
@@ -155,7 +163,10 @@ struct zswap_entry {
|
|
|
int refcount;
|
|
|
unsigned int length;
|
|
|
struct zswap_pool *pool;
|
|
|
- unsigned long handle;
|
|
|
+ union {
|
|
|
+ unsigned long handle;
|
|
|
+ unsigned long value;
|
|
|
+ };
|
|
|
};
|
|
|
|
|
|
struct zswap_header {
|
|
@@ -320,8 +331,12 @@ static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
|
|
|
*/
|
|
|
static void zswap_free_entry(struct zswap_entry *entry)
|
|
|
{
|
|
|
- zpool_free(entry->pool->zpool, entry->handle);
|
|
|
- zswap_pool_put(entry->pool);
|
|
|
+ if (!entry->length)
|
|
|
+ atomic_dec(&zswap_same_filled_pages);
|
|
|
+ else {
|
|
|
+ zpool_free(entry->pool->zpool, entry->handle);
|
|
|
+ zswap_pool_put(entry->pool);
|
|
|
+ }
|
|
|
zswap_entry_cache_free(entry);
|
|
|
atomic_dec(&zswap_stored_pages);
|
|
|
zswap_update_total_size();
|
|
@@ -953,6 +968,28 @@ static int zswap_shrink(void)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
|
|
|
+{
|
|
|
+ unsigned int pos;
|
|
|
+ unsigned long *page;
|
|
|
+
|
|
|
+ page = (unsigned long *)ptr;
|
|
|
+ for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
|
|
|
+ if (page[pos] != page[0])
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ *value = page[0];
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static void zswap_fill_page(void *ptr, unsigned long value)
|
|
|
+{
|
|
|
+ unsigned long *page;
|
|
|
+
|
|
|
+ page = (unsigned long *)ptr;
|
|
|
+ memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
|
|
|
+}
|
|
|
+
|
|
|
/*********************************
|
|
|
* frontswap hooks
|
|
|
**********************************/
|
|
@@ -965,7 +1002,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
|
|
|
struct crypto_comp *tfm;
|
|
|
int ret;
|
|
|
unsigned int dlen = PAGE_SIZE, len;
|
|
|
- unsigned long handle;
|
|
|
+ unsigned long handle, value;
|
|
|
char *buf;
|
|
|
u8 *src, *dst;
|
|
|
struct zswap_header *zhdr;
|
|
@@ -993,6 +1030,19 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
|
|
|
goto reject;
|
|
|
}
|
|
|
|
|
|
+ if (zswap_same_filled_pages_enabled) {
|
|
|
+ src = kmap_atomic(page);
|
|
|
+ if (zswap_is_page_same_filled(src, &value)) {
|
|
|
+ kunmap_atomic(src);
|
|
|
+ entry->offset = offset;
|
|
|
+ entry->length = 0;
|
|
|
+ entry->value = value;
|
|
|
+ atomic_inc(&zswap_same_filled_pages);
|
|
|
+ goto insert_entry;
|
|
|
+ }
|
|
|
+ kunmap_atomic(src);
|
|
|
+ }
|
|
|
+
|
|
|
/* if entry is successfully added, it keeps the reference */
|
|
|
entry->pool = zswap_pool_current_get();
|
|
|
if (!entry->pool) {
|
|
@@ -1037,6 +1087,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
|
|
|
entry->handle = handle;
|
|
|
entry->length = dlen;
|
|
|
|
|
|
+insert_entry:
|
|
|
/* map */
|
|
|
spin_lock(&tree->lock);
|
|
|
do {
|
|
@@ -1089,6 +1140,13 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
|
|
|
}
|
|
|
spin_unlock(&tree->lock);
|
|
|
|
|
|
+ if (!entry->length) {
|
|
|
+ dst = kmap_atomic(page);
|
|
|
+ zswap_fill_page(dst, entry->value);
|
|
|
+ kunmap_atomic(dst);
|
|
|
+ goto freeentry;
|
|
|
+ }
|
|
|
+
|
|
|
/* decompress */
|
|
|
dlen = PAGE_SIZE;
|
|
|
src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
|
|
@@ -1101,6 +1159,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
|
|
|
zpool_unmap_handle(entry->pool->zpool, entry->handle);
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
+freeentry:
|
|
|
spin_lock(&tree->lock);
|
|
|
zswap_entry_put(tree, entry);
|
|
|
spin_unlock(&tree->lock);
|
|
@@ -1209,6 +1268,8 @@ static int __init zswap_debugfs_init(void)
|
|
|
zswap_debugfs_root, &zswap_pool_total_size);
|
|
|
debugfs_create_atomic_t("stored_pages", S_IRUGO,
|
|
|
zswap_debugfs_root, &zswap_stored_pages);
|
|
|
+ debugfs_create_atomic_t("same_filled_pages", 0444,
|
|
|
+ zswap_debugfs_root, &zswap_same_filled_pages);
|
|
|
|
|
|
return 0;
|
|
|
}
|