|
|
@@ -729,14 +729,15 @@ static ssize_t mm_stat_show(struct device *dev,
|
|
|
max_used = atomic_long_read(&zram->stats.max_used_pages);
|
|
|
|
|
|
ret = scnprintf(buf, PAGE_SIZE,
|
|
|
- "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
|
|
|
+ "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
|
|
|
orig_size << PAGE_SHIFT,
|
|
|
(u64)atomic64_read(&zram->stats.compr_data_size),
|
|
|
mem_used << PAGE_SHIFT,
|
|
|
zram->limit_pages << PAGE_SHIFT,
|
|
|
max_used << PAGE_SHIFT,
|
|
|
(u64)atomic64_read(&zram->stats.same_pages),
|
|
|
- pool_stats.pages_compacted);
|
|
|
+ pool_stats.pages_compacted,
|
|
|
+ (u64)atomic64_read(&zram->stats.huge_pages));
|
|
|
up_read(&zram->init_lock);
|
|
|
|
|
|
return ret;
|
|
|
@@ -805,6 +806,11 @@ static void zram_free_page(struct zram *zram, size_t index)
|
|
|
{
|
|
|
unsigned long handle;
|
|
|
|
|
|
+ if (zram_test_flag(zram, index, ZRAM_HUGE)) {
|
|
|
+ zram_clear_flag(zram, index, ZRAM_HUGE);
|
|
|
+ atomic64_dec(&zram->stats.huge_pages);
|
|
|
+ }
|
|
|
+
|
|
|
if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
|
|
|
zram_wb_clear(zram, index);
|
|
|
atomic64_dec(&zram->stats.pages_stored);
|
|
|
@@ -973,6 +979,7 @@ compress_again:
|
|
|
}
|
|
|
|
|
|
if (unlikely(comp_len >= huge_class_size)) {
|
|
|
+ comp_len = PAGE_SIZE;
|
|
|
if (zram_wb_enabled(zram) && allow_wb) {
|
|
|
zcomp_stream_put(zram->comp);
|
|
|
ret = write_to_bdev(zram, bvec, index, bio, &element);
|
|
|
@@ -984,7 +991,6 @@ compress_again:
|
|
|
allow_wb = false;
|
|
|
goto compress_again;
|
|
|
}
|
|
|
- comp_len = PAGE_SIZE;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -1046,6 +1052,11 @@ out:
|
|
|
zram_slot_lock(zram, index);
|
|
|
zram_free_page(zram, index);
|
|
|
|
|
|
+ if (comp_len == PAGE_SIZE) {
|
|
|
+ zram_set_flag(zram, index, ZRAM_HUGE);
|
|
|
+ atomic64_inc(&zram->stats.huge_pages);
|
|
|
+ }
|
|
|
+
|
|
|
if (flags) {
|
|
|
zram_set_flag(zram, index, flags);
|
|
|
zram_set_element(zram, index, element);
|