|
|
@@ -53,6 +53,11 @@ static size_t huge_class_size;
|
|
|
|
|
|
static void zram_free_page(struct zram *zram, size_t index);
|
|
|
|
|
|
+static int zram_slot_trylock(struct zram *zram, u32 index)
|
|
|
+{
|
|
|
+ return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].value);
|
|
|
+}
|
|
|
+
|
|
|
static void zram_slot_lock(struct zram *zram, u32 index)
|
|
|
{
|
|
|
bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
|
|
|
@@ -401,7 +406,6 @@ static ssize_t backing_dev_store(struct device *dev,
|
|
|
goto out;
|
|
|
|
|
|
reset_bdev(zram);
|
|
|
- spin_lock_init(&zram->bitmap_lock);
|
|
|
|
|
|
zram->old_block_size = old_block_size;
|
|
|
zram->bdev = bdev;
|
|
|
@@ -445,29 +449,24 @@ out:
|
|
|
|
|
|
static unsigned long get_entry_bdev(struct zram *zram)
|
|
|
{
|
|
|
- unsigned long entry;
|
|
|
-
|
|
|
- spin_lock(&zram->bitmap_lock);
|
|
|
+ unsigned long blk_idx = 1;
|
|
|
+retry:
|
|
|
/* skip 0 bit to confuse zram.handle = 0 */
|
|
|
- entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
|
|
|
- if (entry == zram->nr_pages) {
|
|
|
- spin_unlock(&zram->bitmap_lock);
|
|
|
+ blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
|
|
|
+ if (blk_idx == zram->nr_pages)
|
|
|
return 0;
|
|
|
- }
|
|
|
|
|
|
- set_bit(entry, zram->bitmap);
|
|
|
- spin_unlock(&zram->bitmap_lock);
|
|
|
+ if (test_and_set_bit(blk_idx, zram->bitmap))
|
|
|
+ goto retry;
|
|
|
|
|
|
- return entry;
|
|
|
+ return blk_idx;
|
|
|
}
|
|
|
|
|
|
static void put_entry_bdev(struct zram *zram, unsigned long entry)
|
|
|
{
|
|
|
int was_set;
|
|
|
|
|
|
- spin_lock(&zram->bitmap_lock);
|
|
|
was_set = test_and_clear_bit(entry, zram->bitmap);
|
|
|
- spin_unlock(&zram->bitmap_lock);
|
|
|
WARN_ON_ONCE(!was_set);
|
|
|
}
|
|
|
|
|
|
@@ -888,9 +887,10 @@ static ssize_t debug_stat_show(struct device *dev,
|
|
|
|
|
|
down_read(&zram->init_lock);
|
|
|
ret = scnprintf(buf, PAGE_SIZE,
|
|
|
- "version: %d\n%8llu\n",
|
|
|
+ "version: %d\n%8llu %8llu\n",
|
|
|
version,
|
|
|
- (u64)atomic64_read(&zram->stats.writestall));
|
|
|
+ (u64)atomic64_read(&zram->stats.writestall),
|
|
|
+ (u64)atomic64_read(&zram->stats.miss_free));
|
|
|
up_read(&zram->init_lock);
|
|
|
|
|
|
return ret;
|
|
|
@@ -1402,10 +1402,14 @@ static void zram_slot_free_notify(struct block_device *bdev,
|
|
|
|
|
|
zram = bdev->bd_disk->private_data;
|
|
|
|
|
|
- zram_slot_lock(zram, index);
|
|
|
+ atomic64_inc(&zram->stats.notify_free);
|
|
|
+ if (!zram_slot_trylock(zram, index)) {
|
|
|
+ atomic64_inc(&zram->stats.miss_free);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
zram_free_page(zram, index);
|
|
|
zram_slot_unlock(zram, index);
|
|
|
- atomic64_inc(&zram->stats.notify_free);
|
|
|
}
|
|
|
|
|
|
static int zram_rw_page(struct block_device *bdev, sector_t sector,
|