|
@@ -74,6 +74,17 @@ static void zram_clear_flag(struct zram_meta *meta, u32 index,
|
|
meta->table[index].value &= ~BIT(flag);
|
|
meta->table[index].value &= ~BIT(flag);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void zram_set_element(struct zram_meta *meta, u32 index,
|
|
|
|
+ unsigned long element)
|
|
|
|
+{
|
|
|
|
+ meta->table[index].element = element;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void zram_clear_element(struct zram_meta *meta, u32 index)
|
|
|
|
+{
|
|
|
|
+ meta->table[index].element = 0;
|
|
|
|
+}
|
|
|
|
+
|
|
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
|
|
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
|
|
{
|
|
{
|
|
return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
|
|
return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
|
|
@@ -146,31 +157,46 @@ static inline void update_used_max(struct zram *zram,
|
|
} while (old_max != cur_max);
|
|
} while (old_max != cur_max);
|
|
}
|
|
}
|
|
|
|
|
|
-static bool page_zero_filled(void *ptr)
|
|
|
|
|
|
+static inline void zram_fill_page(char *ptr, unsigned long len,
|
|
|
|
+ unsigned long value)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+ unsigned long *page = (unsigned long *)ptr;
|
|
|
|
+
|
|
|
|
+ WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
|
|
|
|
+
|
|
|
|
+ if (likely(value == 0)) {
|
|
|
|
+ memset(ptr, 0, len);
|
|
|
|
+ } else {
|
|
|
|
+ for (i = 0; i < len / sizeof(*page); i++)
|
|
|
|
+ page[i] = value;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool page_same_filled(void *ptr, unsigned long *element)
|
|
{
|
|
{
|
|
unsigned int pos;
|
|
unsigned int pos;
|
|
unsigned long *page;
|
|
unsigned long *page;
|
|
|
|
|
|
page = (unsigned long *)ptr;
|
|
page = (unsigned long *)ptr;
|
|
|
|
|
|
- for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
|
|
|
|
- if (page[pos])
|
|
|
|
|
|
+ for (pos = 0; pos < PAGE_SIZE / sizeof(*page) - 1; pos++) {
|
|
|
|
+ if (page[pos] != page[pos + 1])
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ *element = page[pos];
|
|
|
|
+
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
-static void handle_zero_page(struct bio_vec *bvec)
|
|
|
|
|
|
+static void handle_same_page(struct bio_vec *bvec, unsigned long element)
|
|
{
|
|
{
|
|
struct page *page = bvec->bv_page;
|
|
struct page *page = bvec->bv_page;
|
|
void *user_mem;
|
|
void *user_mem;
|
|
|
|
|
|
user_mem = kmap_atomic(page);
|
|
user_mem = kmap_atomic(page);
|
|
- if (is_partial_io(bvec))
|
|
|
|
- memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
|
|
|
|
- else
|
|
|
|
- clear_page(user_mem);
|
|
|
|
|
|
+ zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
|
|
kunmap_atomic(user_mem);
|
|
kunmap_atomic(user_mem);
|
|
|
|
|
|
flush_dcache_page(page);
|
|
flush_dcache_page(page);
|
|
@@ -363,7 +389,7 @@ static ssize_t mm_stat_show(struct device *dev,
|
|
mem_used << PAGE_SHIFT,
|
|
mem_used << PAGE_SHIFT,
|
|
zram->limit_pages << PAGE_SHIFT,
|
|
zram->limit_pages << PAGE_SHIFT,
|
|
max_used << PAGE_SHIFT,
|
|
max_used << PAGE_SHIFT,
|
|
- (u64)atomic64_read(&zram->stats.zero_pages),
|
|
|
|
|
|
+ (u64)atomic64_read(&zram->stats.same_pages),
|
|
pool_stats.pages_compacted);
|
|
pool_stats.pages_compacted);
|
|
up_read(&zram->init_lock);
|
|
up_read(&zram->init_lock);
|
|
|
|
|
|
@@ -391,18 +417,6 @@ static DEVICE_ATTR_RO(io_stat);
|
|
static DEVICE_ATTR_RO(mm_stat);
|
|
static DEVICE_ATTR_RO(mm_stat);
|
|
static DEVICE_ATTR_RO(debug_stat);
|
|
static DEVICE_ATTR_RO(debug_stat);
|
|
|
|
|
|
-static inline bool zram_meta_get(struct zram *zram)
|
|
|
|
-{
|
|
|
|
- if (atomic_inc_not_zero(&zram->refcount))
|
|
|
|
- return true;
|
|
|
|
- return false;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void zram_meta_put(struct zram *zram)
|
|
|
|
-{
|
|
|
|
- atomic_dec(&zram->refcount);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void zram_meta_free(struct zram_meta *meta, u64 disksize)
|
|
static void zram_meta_free(struct zram_meta *meta, u64 disksize)
|
|
{
|
|
{
|
|
size_t num_pages = disksize >> PAGE_SHIFT;
|
|
size_t num_pages = disksize >> PAGE_SHIFT;
|
|
@@ -411,8 +425,11 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
|
|
/* Free all pages that are still in this zram device */
|
|
/* Free all pages that are still in this zram device */
|
|
for (index = 0; index < num_pages; index++) {
|
|
for (index = 0; index < num_pages; index++) {
|
|
unsigned long handle = meta->table[index].handle;
|
|
unsigned long handle = meta->table[index].handle;
|
|
-
|
|
|
|
- if (!handle)
|
|
|
|
|
|
+ /*
|
|
|
|
+ * No memory is allocated for same element filled pages.
|
|
|
|
+ * Simply clear same page flag.
|
|
|
|
+ */
|
|
|
|
+ if (!handle || zram_test_flag(meta, index, ZRAM_SAME))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
zs_free(meta->mem_pool, handle);
|
|
zs_free(meta->mem_pool, handle);
|
|
@@ -462,18 +479,20 @@ static void zram_free_page(struct zram *zram, size_t index)
|
|
struct zram_meta *meta = zram->meta;
|
|
struct zram_meta *meta = zram->meta;
|
|
unsigned long handle = meta->table[index].handle;
|
|
unsigned long handle = meta->table[index].handle;
|
|
|
|
|
|
- if (unlikely(!handle)) {
|
|
|
|
- /*
|
|
|
|
- * No memory is allocated for zero filled pages.
|
|
|
|
- * Simply clear zero page flag.
|
|
|
|
- */
|
|
|
|
- if (zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
|
|
- zram_clear_flag(meta, index, ZRAM_ZERO);
|
|
|
|
- atomic64_dec(&zram->stats.zero_pages);
|
|
|
|
- }
|
|
|
|
|
|
+ /*
|
|
|
|
+ * No memory is allocated for same element filled pages.
|
|
|
|
+ * Simply clear same page flag.
|
|
|
|
+ */
|
|
|
|
+ if (zram_test_flag(meta, index, ZRAM_SAME)) {
|
|
|
|
+ zram_clear_flag(meta, index, ZRAM_SAME);
|
|
|
|
+ zram_clear_element(meta, index);
|
|
|
|
+ atomic64_dec(&zram->stats.same_pages);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (!handle)
|
|
|
|
+ return;
|
|
|
|
+
|
|
zs_free(meta->mem_pool, handle);
|
|
zs_free(meta->mem_pool, handle);
|
|
|
|
|
|
atomic64_sub(zram_get_obj_size(meta, index),
|
|
atomic64_sub(zram_get_obj_size(meta, index),
|
|
@@ -496,9 +515,9 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
handle = meta->table[index].handle;
|
|
handle = meta->table[index].handle;
|
|
size = zram_get_obj_size(meta, index);
|
|
size = zram_get_obj_size(meta, index);
|
|
|
|
|
|
- if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
|
|
|
|
+ if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
- clear_page(mem);
|
|
|
|
|
|
+ zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -534,9 +553,9 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|
|
|
|
|
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
if (unlikely(!meta->table[index].handle) ||
|
|
if (unlikely(!meta->table[index].handle) ||
|
|
- zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
|
|
|
|
+ zram_test_flag(meta, index, ZRAM_SAME)) {
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
- handle_zero_page(bvec);
|
|
|
|
|
|
+ handle_same_page(bvec, meta->table[index].element);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
@@ -584,6 +603,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
struct zram_meta *meta = zram->meta;
|
|
struct zram_meta *meta = zram->meta;
|
|
struct zcomp_strm *zstrm = NULL;
|
|
struct zcomp_strm *zstrm = NULL;
|
|
unsigned long alloced_pages;
|
|
unsigned long alloced_pages;
|
|
|
|
+ unsigned long element;
|
|
|
|
|
|
page = bvec->bv_page;
|
|
page = bvec->bv_page;
|
|
if (is_partial_io(bvec)) {
|
|
if (is_partial_io(bvec)) {
|
|
@@ -612,16 +632,17 @@ compress_again:
|
|
uncmem = user_mem;
|
|
uncmem = user_mem;
|
|
}
|
|
}
|
|
|
|
|
|
- if (page_zero_filled(uncmem)) {
|
|
|
|
|
|
+ if (page_same_filled(uncmem, &element)) {
|
|
if (user_mem)
|
|
if (user_mem)
|
|
kunmap_atomic(user_mem);
|
|
kunmap_atomic(user_mem);
|
|
/* Free memory associated with this sector now. */
|
|
/* Free memory associated with this sector now. */
|
|
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
zram_free_page(zram, index);
|
|
zram_free_page(zram, index);
|
|
- zram_set_flag(meta, index, ZRAM_ZERO);
|
|
|
|
|
|
+ zram_set_flag(meta, index, ZRAM_SAME);
|
|
|
|
+ zram_set_element(meta, index, element);
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
|
|
|
- atomic64_inc(&zram->stats.zero_pages);
|
|
|
|
|
|
+ atomic64_inc(&zram->stats.same_pages);
|
|
ret = 0;
|
|
ret = 0;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
@@ -859,22 +880,17 @@ static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
|
|
{
|
|
{
|
|
struct zram *zram = queue->queuedata;
|
|
struct zram *zram = queue->queuedata;
|
|
|
|
|
|
- if (unlikely(!zram_meta_get(zram)))
|
|
|
|
- goto error;
|
|
|
|
-
|
|
|
|
blk_queue_split(queue, &bio, queue->bio_split);
|
|
blk_queue_split(queue, &bio, queue->bio_split);
|
|
|
|
|
|
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
|
|
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
|
|
bio->bi_iter.bi_size)) {
|
|
bio->bi_iter.bi_size)) {
|
|
atomic64_inc(&zram->stats.invalid_io);
|
|
atomic64_inc(&zram->stats.invalid_io);
|
|
- goto put_zram;
|
|
|
|
|
|
+ goto error;
|
|
}
|
|
}
|
|
|
|
|
|
__zram_make_request(zram, bio);
|
|
__zram_make_request(zram, bio);
|
|
- zram_meta_put(zram);
|
|
|
|
return BLK_QC_T_NONE;
|
|
return BLK_QC_T_NONE;
|
|
-put_zram:
|
|
|
|
- zram_meta_put(zram);
|
|
|
|
|
|
+
|
|
error:
|
|
error:
|
|
bio_io_error(bio);
|
|
bio_io_error(bio);
|
|
return BLK_QC_T_NONE;
|
|
return BLK_QC_T_NONE;
|
|
@@ -904,13 +920,11 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
|
|
struct bio_vec bv;
|
|
struct bio_vec bv;
|
|
|
|
|
|
zram = bdev->bd_disk->private_data;
|
|
zram = bdev->bd_disk->private_data;
|
|
- if (unlikely(!zram_meta_get(zram)))
|
|
|
|
- goto out;
|
|
|
|
|
|
|
|
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
|
|
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
|
|
atomic64_inc(&zram->stats.invalid_io);
|
|
atomic64_inc(&zram->stats.invalid_io);
|
|
err = -EINVAL;
|
|
err = -EINVAL;
|
|
- goto put_zram;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
index = sector >> SECTORS_PER_PAGE_SHIFT;
|
|
index = sector >> SECTORS_PER_PAGE_SHIFT;
|
|
@@ -921,8 +935,6 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
|
|
bv.bv_offset = 0;
|
|
bv.bv_offset = 0;
|
|
|
|
|
|
err = zram_bvec_rw(zram, &bv, index, offset, is_write);
|
|
err = zram_bvec_rw(zram, &bv, index, offset, is_write);
|
|
-put_zram:
|
|
|
|
- zram_meta_put(zram);
|
|
|
|
out:
|
|
out:
|
|
/*
|
|
/*
|
|
* If I/O fails, just return error(ie, non-zero) without
|
|
* If I/O fails, just return error(ie, non-zero) without
|
|
@@ -955,17 +967,6 @@ static void zram_reset_device(struct zram *zram)
|
|
meta = zram->meta;
|
|
meta = zram->meta;
|
|
comp = zram->comp;
|
|
comp = zram->comp;
|
|
disksize = zram->disksize;
|
|
disksize = zram->disksize;
|
|
- /*
|
|
|
|
- * Refcount will go down to 0 eventually and r/w handler
|
|
|
|
- * cannot handle further I/O so it will bail out by
|
|
|
|
- * check zram_meta_get.
|
|
|
|
- */
|
|
|
|
- zram_meta_put(zram);
|
|
|
|
- /*
|
|
|
|
- * We want to free zram_meta in process context to avoid
|
|
|
|
- * deadlock between reclaim path and any other locks.
|
|
|
|
- */
|
|
|
|
- wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
|
|
|
|
|
|
|
|
/* Reset stats */
|
|
/* Reset stats */
|
|
memset(&zram->stats, 0, sizeof(zram->stats));
|
|
memset(&zram->stats, 0, sizeof(zram->stats));
|
|
@@ -1013,8 +1014,6 @@ static ssize_t disksize_store(struct device *dev,
|
|
goto out_destroy_comp;
|
|
goto out_destroy_comp;
|
|
}
|
|
}
|
|
|
|
|
|
- init_waitqueue_head(&zram->io_done);
|
|
|
|
- atomic_set(&zram->refcount, 1);
|
|
|
|
zram->meta = meta;
|
|
zram->meta = meta;
|
|
zram->comp = comp;
|
|
zram->comp = comp;
|
|
zram->disksize = disksize;
|
|
zram->disksize = disksize;
|