|
@@ -2,6 +2,7 @@
|
|
|
* Compressed RAM block device
|
|
|
*
|
|
|
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
|
|
|
+ * 2012, 2013 Minchan Kim
|
|
|
*
|
|
|
* This code is released using a dual license strategy: BSD/GPL
|
|
|
* You can choose the licence that better fits your requirements.
|
|
@@ -9,7 +10,6 @@
|
|
|
* Released under the terms of 3-clause BSD License
|
|
|
* Released under the terms of GNU General Public License Version 2.0
|
|
|
*
|
|
|
- * Project home: http://compcache.googlecode.com
|
|
|
*/
|
|
|
|
|
|
#define KMSG_COMPONENT "zram"
|
|
@@ -104,7 +104,7 @@ static ssize_t zero_pages_show(struct device *dev,
|
|
|
{
|
|
|
struct zram *zram = dev_to_zram(dev);
|
|
|
|
|
|
- return sprintf(buf, "%u\n", zram->stats.pages_zero);
|
|
|
+ return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
|
|
|
}
|
|
|
|
|
|
static ssize_t orig_data_size_show(struct device *dev,
|
|
@@ -113,7 +113,7 @@ static ssize_t orig_data_size_show(struct device *dev,
|
|
|
struct zram *zram = dev_to_zram(dev);
|
|
|
|
|
|
return sprintf(buf, "%llu\n",
|
|
|
- (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
|
|
|
+ (u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
|
|
|
}
|
|
|
|
|
|
static ssize_t compr_data_size_show(struct device *dev,
|
|
@@ -140,6 +140,7 @@ static ssize_t mem_used_total_show(struct device *dev,
|
|
|
return sprintf(buf, "%llu\n", val);
|
|
|
}
|
|
|
|
|
|
+/* flag operations needs meta->tb_lock */
|
|
|
static int zram_test_flag(struct zram_meta *meta, u32 index,
|
|
|
enum zram_pageflags flag)
|
|
|
{
|
|
@@ -228,6 +229,8 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
|
|
|
goto free_table;
|
|
|
}
|
|
|
|
|
|
+ rwlock_init(&meta->tb_lock);
|
|
|
+ mutex_init(&meta->buffer_lock);
|
|
|
return meta;
|
|
|
|
|
|
free_table:
|
|
@@ -280,6 +283,7 @@ static void handle_zero_page(struct bio_vec *bvec)
|
|
|
flush_dcache_page(page);
|
|
|
}
|
|
|
|
|
|
+/* NOTE: caller should hold meta->tb_lock with write-side */
|
|
|
static void zram_free_page(struct zram *zram, size_t index)
|
|
|
{
|
|
|
struct zram_meta *meta = zram->meta;
|
|
@@ -293,21 +297,21 @@ static void zram_free_page(struct zram *zram, size_t index)
|
|
|
*/
|
|
|
if (zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
|
zram_clear_flag(meta, index, ZRAM_ZERO);
|
|
|
- zram->stats.pages_zero--;
|
|
|
+ atomic_dec(&zram->stats.pages_zero);
|
|
|
}
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
if (unlikely(size > max_zpage_size))
|
|
|
- zram->stats.bad_compress--;
|
|
|
+ atomic_dec(&zram->stats.bad_compress);
|
|
|
|
|
|
zs_free(meta->mem_pool, handle);
|
|
|
|
|
|
if (size <= PAGE_SIZE / 2)
|
|
|
- zram->stats.good_compress--;
|
|
|
+ atomic_dec(&zram->stats.good_compress);
|
|
|
|
|
|
atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
|
|
|
- zram->stats.pages_stored--;
|
|
|
+ atomic_dec(&zram->stats.pages_stored);
|
|
|
|
|
|
meta->table[index].handle = 0;
|
|
|
meta->table[index].size = 0;
|
|
@@ -319,20 +323,26 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
|
size_t clen = PAGE_SIZE;
|
|
|
unsigned char *cmem;
|
|
|
struct zram_meta *meta = zram->meta;
|
|
|
- unsigned long handle = meta->table[index].handle;
|
|
|
+ unsigned long handle;
|
|
|
+ u16 size;
|
|
|
+
|
|
|
+ read_lock(&meta->tb_lock);
|
|
|
+ handle = meta->table[index].handle;
|
|
|
+ size = meta->table[index].size;
|
|
|
|
|
|
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
|
+ read_unlock(&meta->tb_lock);
|
|
|
clear_page(mem);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
|
|
|
- if (meta->table[index].size == PAGE_SIZE)
|
|
|
+ if (size == PAGE_SIZE)
|
|
|
copy_page(mem, cmem);
|
|
|
else
|
|
|
- ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
|
|
|
- mem, &clen);
|
|
|
+ ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
|
|
|
zs_unmap_object(meta->mem_pool, handle);
|
|
|
+ read_unlock(&meta->tb_lock);
|
|
|
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
|
if (unlikely(ret != LZO_E_OK)) {
|
|
@@ -353,11 +363,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|
|
struct zram_meta *meta = zram->meta;
|
|
|
page = bvec->bv_page;
|
|
|
|
|
|
+ read_lock(&meta->tb_lock);
|
|
|
if (unlikely(!meta->table[index].handle) ||
|
|
|
zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
|
+ read_unlock(&meta->tb_lock);
|
|
|
handle_zero_page(bvec);
|
|
|
return 0;
|
|
|
}
|
|
|
+ read_unlock(&meta->tb_lock);
|
|
|
|
|
|
if (is_partial_io(bvec))
|
|
|
/* Use a temporary buffer to decompress the page */
|
|
@@ -400,6 +413,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
struct page *page;
|
|
|
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
|
|
|
struct zram_meta *meta = zram->meta;
|
|
|
+ bool locked = false;
|
|
|
|
|
|
page = bvec->bv_page;
|
|
|
src = meta->compress_buffer;
|
|
@@ -419,6 +433,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ mutex_lock(&meta->buffer_lock);
|
|
|
+ locked = true;
|
|
|
user_mem = kmap_atomic(page);
|
|
|
|
|
|
if (is_partial_io(bvec)) {
|
|
@@ -433,25 +449,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
if (page_zero_filled(uncmem)) {
|
|
|
kunmap_atomic(user_mem);
|
|
|
/* Free memory associated with this sector now. */
|
|
|
+ write_lock(&zram->meta->tb_lock);
|
|
|
zram_free_page(zram, index);
|
|
|
-
|
|
|
- zram->stats.pages_zero++;
|
|
|
zram_set_flag(meta, index, ZRAM_ZERO);
|
|
|
+ write_unlock(&zram->meta->tb_lock);
|
|
|
+
|
|
|
+ atomic_inc(&zram->stats.pages_zero);
|
|
|
ret = 0;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * zram_slot_free_notify could miss free so that let's
|
|
|
- * double check.
|
|
|
- */
|
|
|
- if (unlikely(meta->table[index].handle ||
|
|
|
- zram_test_flag(meta, index, ZRAM_ZERO)))
|
|
|
- zram_free_page(zram, index);
|
|
|
-
|
|
|
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
|
|
|
meta->compress_workmem);
|
|
|
-
|
|
|
if (!is_partial_io(bvec)) {
|
|
|
kunmap_atomic(user_mem);
|
|
|
user_mem = NULL;
|
|
@@ -464,7 +473,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
}
|
|
|
|
|
|
if (unlikely(clen > max_zpage_size)) {
|
|
|
- zram->stats.bad_compress++;
|
|
|
+ atomic_inc(&zram->stats.bad_compress);
|
|
|
clen = PAGE_SIZE;
|
|
|
src = NULL;
|
|
|
if (is_partial_io(bvec))
|
|
@@ -494,18 +503,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
* Free memory associated with this sector
|
|
|
* before overwriting unused sectors.
|
|
|
*/
|
|
|
+ write_lock(&zram->meta->tb_lock);
|
|
|
zram_free_page(zram, index);
|
|
|
|
|
|
meta->table[index].handle = handle;
|
|
|
meta->table[index].size = clen;
|
|
|
+ write_unlock(&zram->meta->tb_lock);
|
|
|
|
|
|
/* Update stats */
|
|
|
atomic64_add(clen, &zram->stats.compr_size);
|
|
|
- zram->stats.pages_stored++;
|
|
|
+ atomic_inc(&zram->stats.pages_stored);
|
|
|
if (clen <= PAGE_SIZE / 2)
|
|
|
- zram->stats.good_compress++;
|
|
|
+ atomic_inc(&zram->stats.good_compress);
|
|
|
|
|
|
out:
|
|
|
+ if (locked)
|
|
|
+ mutex_unlock(&meta->buffer_lock);
|
|
|
if (is_partial_io(bvec))
|
|
|
kfree(uncmem);
|
|
|
|
|
@@ -514,36 +527,15 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void handle_pending_slot_free(struct zram *zram)
|
|
|
-{
|
|
|
- struct zram_slot_free *free_rq;
|
|
|
-
|
|
|
- spin_lock(&zram->slot_free_lock);
|
|
|
- while (zram->slot_free_rq) {
|
|
|
- free_rq = zram->slot_free_rq;
|
|
|
- zram->slot_free_rq = free_rq->next;
|
|
|
- zram_free_page(zram, free_rq->index);
|
|
|
- kfree(free_rq);
|
|
|
- }
|
|
|
- spin_unlock(&zram->slot_free_lock);
|
|
|
-}
|
|
|
-
|
|
|
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
int offset, struct bio *bio, int rw)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
- if (rw == READ) {
|
|
|
- down_read(&zram->lock);
|
|
|
- handle_pending_slot_free(zram);
|
|
|
+ if (rw == READ)
|
|
|
ret = zram_bvec_read(zram, bvec, index, offset, bio);
|
|
|
- up_read(&zram->lock);
|
|
|
- } else {
|
|
|
- down_write(&zram->lock);
|
|
|
- handle_pending_slot_free(zram);
|
|
|
+ else
|
|
|
ret = zram_bvec_write(zram, bvec, index, offset);
|
|
|
- up_write(&zram->lock);
|
|
|
- }
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -553,8 +545,6 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
|
|
|
size_t index;
|
|
|
struct zram_meta *meta;
|
|
|
|
|
|
- flush_work(&zram->free_work);
|
|
|
-
|
|
|
down_write(&zram->init_lock);
|
|
|
if (!zram->init_done) {
|
|
|
up_write(&zram->init_lock);
|
|
@@ -762,40 +752,19 @@ error:
|
|
|
bio_io_error(bio);
|
|
|
}
|
|
|
|
|
|
-static void zram_slot_free(struct work_struct *work)
|
|
|
-{
|
|
|
- struct zram *zram;
|
|
|
-
|
|
|
- zram = container_of(work, struct zram, free_work);
|
|
|
- down_write(&zram->lock);
|
|
|
- handle_pending_slot_free(zram);
|
|
|
- up_write(&zram->lock);
|
|
|
-}
|
|
|
-
|
|
|
-static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
|
|
|
-{
|
|
|
- spin_lock(&zram->slot_free_lock);
|
|
|
- free_rq->next = zram->slot_free_rq;
|
|
|
- zram->slot_free_rq = free_rq;
|
|
|
- spin_unlock(&zram->slot_free_lock);
|
|
|
-}
|
|
|
-
|
|
|
static void zram_slot_free_notify(struct block_device *bdev,
|
|
|
unsigned long index)
|
|
|
{
|
|
|
struct zram *zram;
|
|
|
- struct zram_slot_free *free_rq;
|
|
|
+ struct zram_meta *meta;
|
|
|
|
|
|
zram = bdev->bd_disk->private_data;
|
|
|
- atomic64_inc(&zram->stats.notify_free);
|
|
|
-
|
|
|
- free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
|
|
|
- if (!free_rq)
|
|
|
- return;
|
|
|
+ meta = zram->meta;
|
|
|
|
|
|
- free_rq->index = index;
|
|
|
- add_slot_free(zram, free_rq);
|
|
|
- schedule_work(&zram->free_work);
|
|
|
+ write_lock(&meta->tb_lock);
|
|
|
+ zram_free_page(zram, index);
|
|
|
+ write_unlock(&meta->tb_lock);
|
|
|
+ atomic64_inc(&zram->stats.notify_free);
|
|
|
}
|
|
|
|
|
|
static const struct block_device_operations zram_devops = {
|
|
@@ -839,13 +808,8 @@ static int create_device(struct zram *zram, int device_id)
|
|
|
{
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
- init_rwsem(&zram->lock);
|
|
|
init_rwsem(&zram->init_lock);
|
|
|
|
|
|
- INIT_WORK(&zram->free_work, zram_slot_free);
|
|
|
- spin_lock_init(&zram->slot_free_lock);
|
|
|
- zram->slot_free_rq = NULL;
|
|
|
-
|
|
|
zram->queue = blk_alloc_queue(GFP_KERNEL);
|
|
|
if (!zram->queue) {
|
|
|
pr_err("Error allocating disk queue for device %d\n",
|