|
@@ -153,6 +153,41 @@ static ssize_t mem_limit_store(struct device *dev,
|
|
|
return len;
|
|
|
}
|
|
|
|
|
|
+static ssize_t mem_used_max_show(struct device *dev,
|
|
|
+ struct device_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ u64 val = 0;
|
|
|
+ struct zram *zram = dev_to_zram(dev);
|
|
|
+
|
|
|
+ down_read(&zram->init_lock);
|
|
|
+ if (init_done(zram))
|
|
|
+ val = atomic_long_read(&zram->stats.max_used_pages);
|
|
|
+ up_read(&zram->init_lock);
|
|
|
+
|
|
|
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t mem_used_max_store(struct device *dev,
|
|
|
+ struct device_attribute *attr, const char *buf, size_t len)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ unsigned long val;
|
|
|
+ struct zram *zram = dev_to_zram(dev);
|
|
|
+ struct zram_meta *meta = zram->meta;
|
|
|
+
|
|
|
+ err = kstrtoul(buf, 10, &val);
|
|
|
+ if (err || val != 0)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ down_read(&zram->init_lock);
|
|
|
+ if (init_done(zram))
|
|
|
+ atomic_long_set(&zram->stats.max_used_pages,
|
|
|
+ zs_get_total_pages(meta->mem_pool));
|
|
|
+ up_read(&zram->init_lock);
|
|
|
+
|
|
|
+ return len;
|
|
|
+}
|
|
|
+
|
|
|
static ssize_t max_comp_streams_store(struct device *dev,
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
{
|
|
@@ -465,6 +500,21 @@ out_cleanup:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static inline void update_used_max(struct zram *zram,
|
|
|
+ const unsigned long pages)
|
|
|
+{
|
|
|
+ int old_max, cur_max;
|
|
|
+
|
|
|
+ old_max = atomic_long_read(&zram->stats.max_used_pages);
|
|
|
+
|
|
|
+ do {
|
|
|
+ cur_max = old_max;
|
|
|
+ if (pages > cur_max)
|
|
|
+ old_max = atomic_long_cmpxchg(
|
|
|
+ &zram->stats.max_used_pages, cur_max, pages);
|
|
|
+ } while (old_max != cur_max);
|
|
|
+}
|
|
|
+
|
|
|
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
int offset)
|
|
|
{
|
|
@@ -476,6 +526,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
struct zram_meta *meta = zram->meta;
|
|
|
struct zcomp_strm *zstrm;
|
|
|
bool locked = false;
|
|
|
+ unsigned long alloced_pages;
|
|
|
|
|
|
page = bvec->bv_page;
|
|
|
if (is_partial_io(bvec)) {
|
|
@@ -545,13 +596,15 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (zram->limit_pages &&
|
|
|
- zs_get_total_pages(meta->mem_pool) > zram->limit_pages) {
|
|
|
+ alloced_pages = zs_get_total_pages(meta->mem_pool);
|
|
|
+ if (zram->limit_pages && alloced_pages > zram->limit_pages) {
|
|
|
zs_free(meta->mem_pool, handle);
|
|
|
ret = -ENOMEM;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ update_used_max(zram, alloced_pages);
|
|
|
+
|
|
|
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
|
|
|
|
|
|
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
|
|
@@ -901,6 +954,8 @@ static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
|
|
|
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
|
|
|
static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show,
|
|
|
mem_limit_store);
|
|
|
+static DEVICE_ATTR(mem_used_max, S_IRUGO | S_IWUSR, mem_used_max_show,
|
|
|
+ mem_used_max_store);
|
|
|
static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
|
|
|
max_comp_streams_show, max_comp_streams_store);
|
|
|
static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
|
|
@@ -930,6 +985,7 @@ static struct attribute *zram_disk_attrs[] = {
|
|
|
&dev_attr_compr_data_size.attr,
|
|
|
&dev_attr_mem_used_total.attr,
|
|
|
&dev_attr_mem_limit.attr,
|
|
|
+ &dev_attr_mem_used_max.attr,
|
|
|
&dev_attr_max_comp_streams.attr,
|
|
|
&dev_attr_comp_algorithm.attr,
|
|
|
NULL,
|