|
@@ -183,19 +183,32 @@ static ssize_t comp_algorithm_store(struct device *dev,
|
|
|
static int zram_test_flag(struct zram_meta *meta, u32 index,
|
|
static int zram_test_flag(struct zram_meta *meta, u32 index,
|
|
|
enum zram_pageflags flag)
|
|
enum zram_pageflags flag)
|
|
|
{
|
|
{
|
|
|
- return meta->table[index].flags & BIT(flag);
|
|
|
|
|
|
|
+ return meta->table[index].value & BIT(flag);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static void zram_set_flag(struct zram_meta *meta, u32 index,
|
|
static void zram_set_flag(struct zram_meta *meta, u32 index,
|
|
|
enum zram_pageflags flag)
|
|
enum zram_pageflags flag)
|
|
|
{
|
|
{
|
|
|
- meta->table[index].flags |= BIT(flag);
|
|
|
|
|
|
|
+ meta->table[index].value |= BIT(flag);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static void zram_clear_flag(struct zram_meta *meta, u32 index,
|
|
static void zram_clear_flag(struct zram_meta *meta, u32 index,
|
|
|
enum zram_pageflags flag)
|
|
enum zram_pageflags flag)
|
|
|
{
|
|
{
|
|
|
- meta->table[index].flags &= ~BIT(flag);
|
|
|
|
|
|
|
+ meta->table[index].value &= ~BIT(flag);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
|
|
|
|
|
+{
|
|
|
|
|
+ return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static void zram_set_obj_size(struct zram_meta *meta,
|
|
|
|
|
+ u32 index, size_t size)
|
|
|
|
|
+{
|
|
|
|
|
+ unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
|
|
|
|
|
+
|
|
|
|
|
+ meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static inline int is_partial_io(struct bio_vec *bvec)
|
|
static inline int is_partial_io(struct bio_vec *bvec)
|
|
@@ -255,7 +268,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
|
|
|
goto free_table;
|
|
goto free_table;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- rwlock_init(&meta->tb_lock);
|
|
|
|
|
return meta;
|
|
return meta;
|
|
|
|
|
|
|
|
free_table:
|
|
free_table:
|
|
@@ -304,7 +316,12 @@ static void handle_zero_page(struct bio_vec *bvec)
|
|
|
flush_dcache_page(page);
|
|
flush_dcache_page(page);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-/* NOTE: caller should hold meta->tb_lock with write-side */
|
|
|
|
|
|
|
+
|
|
|
|
|
+/*
|
|
|
|
|
+ * To protect concurrent access to the same index entry,
|
|
|
|
|
+ * caller should hold this table index entry's bit_spinlock to
|
|
|
|
|
+ * indicate this index entry is accessing.
|
|
|
|
|
+ */
|
|
|
static void zram_free_page(struct zram *zram, size_t index)
|
|
static void zram_free_page(struct zram *zram, size_t index)
|
|
|
{
|
|
{
|
|
|
struct zram_meta *meta = zram->meta;
|
|
struct zram_meta *meta = zram->meta;
|
|
@@ -324,11 +341,12 @@ static void zram_free_page(struct zram *zram, size_t index)
|
|
|
|
|
|
|
|
zs_free(meta->mem_pool, handle);
|
|
zs_free(meta->mem_pool, handle);
|
|
|
|
|
|
|
|
- atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
|
|
|
|
|
|
|
+ atomic64_sub(zram_get_obj_size(meta, index),
|
|
|
|
|
+ &zram->stats.compr_data_size);
|
|
|
atomic64_dec(&zram->stats.pages_stored);
|
|
atomic64_dec(&zram->stats.pages_stored);
|
|
|
|
|
|
|
|
meta->table[index].handle = 0;
|
|
meta->table[index].handle = 0;
|
|
|
- meta->table[index].size = 0;
|
|
|
|
|
|
|
+ zram_set_obj_size(meta, index, 0);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
@@ -339,12 +357,12 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
|
unsigned long handle;
|
|
unsigned long handle;
|
|
|
size_t size;
|
|
size_t size;
|
|
|
|
|
|
|
|
- read_lock(&meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
handle = meta->table[index].handle;
|
|
handle = meta->table[index].handle;
|
|
|
- size = meta->table[index].size;
|
|
|
|
|
|
|
+ size = zram_get_obj_size(meta, index);
|
|
|
|
|
|
|
|
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
|
- read_unlock(&meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
clear_page(mem);
|
|
clear_page(mem);
|
|
|
return 0;
|
|
return 0;
|
|
|
}
|
|
}
|
|
@@ -355,7 +373,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
|
else
|
|
else
|
|
|
ret = zcomp_decompress(zram->comp, cmem, size, mem);
|
|
ret = zcomp_decompress(zram->comp, cmem, size, mem);
|
|
|
zs_unmap_object(meta->mem_pool, handle);
|
|
zs_unmap_object(meta->mem_pool, handle);
|
|
|
- read_unlock(&meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
|
|
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
|
if (unlikely(ret)) {
|
|
if (unlikely(ret)) {
|
|
@@ -376,14 +394,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|
|
struct zram_meta *meta = zram->meta;
|
|
struct zram_meta *meta = zram->meta;
|
|
|
page = bvec->bv_page;
|
|
page = bvec->bv_page;
|
|
|
|
|
|
|
|
- read_lock(&meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
if (unlikely(!meta->table[index].handle) ||
|
|
if (unlikely(!meta->table[index].handle) ||
|
|
|
zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
|
- read_unlock(&meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
handle_zero_page(bvec);
|
|
handle_zero_page(bvec);
|
|
|
return 0;
|
|
return 0;
|
|
|
}
|
|
}
|
|
|
- read_unlock(&meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
|
|
|
|
|
if (is_partial_io(bvec))
|
|
if (is_partial_io(bvec))
|
|
|
/* Use a temporary buffer to decompress the page */
|
|
/* Use a temporary buffer to decompress the page */
|
|
@@ -461,10 +479,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
if (page_zero_filled(uncmem)) {
|
|
if (page_zero_filled(uncmem)) {
|
|
|
kunmap_atomic(user_mem);
|
|
kunmap_atomic(user_mem);
|
|
|
/* Free memory associated with this sector now. */
|
|
/* Free memory associated with this sector now. */
|
|
|
- write_lock(&zram->meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
zram_free_page(zram, index);
|
|
zram_free_page(zram, index);
|
|
|
zram_set_flag(meta, index, ZRAM_ZERO);
|
|
zram_set_flag(meta, index, ZRAM_ZERO);
|
|
|
- write_unlock(&zram->meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
|
|
|
|
|
atomic64_inc(&zram->stats.zero_pages);
|
|
atomic64_inc(&zram->stats.zero_pages);
|
|
|
ret = 0;
|
|
ret = 0;
|
|
@@ -514,12 +532,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
* Free memory associated with this sector
|
|
* Free memory associated with this sector
|
|
|
* before overwriting unused sectors.
|
|
* before overwriting unused sectors.
|
|
|
*/
|
|
*/
|
|
|
- write_lock(&zram->meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
zram_free_page(zram, index);
|
|
zram_free_page(zram, index);
|
|
|
|
|
|
|
|
meta->table[index].handle = handle;
|
|
meta->table[index].handle = handle;
|
|
|
- meta->table[index].size = clen;
|
|
|
|
|
- write_unlock(&zram->meta->tb_lock);
|
|
|
|
|
|
|
+ zram_set_obj_size(meta, index, clen);
|
|
|
|
|
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
|
|
|
|
|
/* Update stats */
|
|
/* Update stats */
|
|
|
atomic64_add(clen, &zram->stats.compr_data_size);
|
|
atomic64_add(clen, &zram->stats.compr_data_size);
|
|
@@ -560,6 +578,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
|
|
|
int offset, struct bio *bio)
|
|
int offset, struct bio *bio)
|
|
|
{
|
|
{
|
|
|
size_t n = bio->bi_iter.bi_size;
|
|
size_t n = bio->bi_iter.bi_size;
|
|
|
|
|
+ struct zram_meta *meta = zram->meta;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
|
* zram manages data in physical block size units. Because logical block
|
|
* zram manages data in physical block size units. Because logical block
|
|
@@ -580,13 +599,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
while (n >= PAGE_SIZE) {
|
|
while (n >= PAGE_SIZE) {
|
|
|
- /*
|
|
|
|
|
- * Discard request can be large so the lock hold times could be
|
|
|
|
|
- * lengthy. So take the lock once per page.
|
|
|
|
|
- */
|
|
|
|
|
- write_lock(&zram->meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
zram_free_page(zram, index);
|
|
zram_free_page(zram, index);
|
|
|
- write_unlock(&zram->meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
index++;
|
|
index++;
|
|
|
n -= PAGE_SIZE;
|
|
n -= PAGE_SIZE;
|
|
|
}
|
|
}
|
|
@@ -821,9 +836,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
|
|
|
zram = bdev->bd_disk->private_data;
|
|
zram = bdev->bd_disk->private_data;
|
|
|
meta = zram->meta;
|
|
meta = zram->meta;
|
|
|
|
|
|
|
|
- write_lock(&meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
zram_free_page(zram, index);
|
|
zram_free_page(zram, index);
|
|
|
- write_unlock(&meta->tb_lock);
|
|
|
|
|
|
|
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
|
atomic64_inc(&zram->stats.notify_free);
|
|
atomic64_inc(&zram->stats.notify_free);
|
|
|
}
|
|
}
|
|
|
|
|
|