|
@@ -91,6 +91,7 @@
|
|
|
#include <linux/hardirq.h>
|
|
|
#include <linux/spinlock.h>
|
|
|
#include <linux/types.h>
|
|
|
+#include <linux/debugfs.h>
|
|
|
#include <linux/zsmalloc.h>
|
|
|
#include <linux/zpool.h>
|
|
|
|
|
@@ -168,6 +169,22 @@ enum fullness_group {
|
|
|
ZS_FULL
|
|
|
};
|
|
|
|
|
|
+enum zs_stat_type {
|
|
|
+ OBJ_ALLOCATED,
|
|
|
+ OBJ_USED,
|
|
|
+ NR_ZS_STAT_TYPE,
|
|
|
+};
|
|
|
+
|
|
|
+#ifdef CONFIG_ZSMALLOC_STAT
|
|
|
+
|
|
|
+static struct dentry *zs_stat_root;
|
|
|
+
|
|
|
+struct zs_size_stat {
|
|
|
+ unsigned long objs[NR_ZS_STAT_TYPE];
|
|
|
+};
|
|
|
+
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* number of size_classes
|
|
|
*/
|
|
@@ -200,6 +217,10 @@ struct size_class {
|
|
|
/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
|
|
|
int pages_per_zspage;
|
|
|
|
|
|
+#ifdef CONFIG_ZSMALLOC_STAT
|
|
|
+ struct zs_size_stat stats;
|
|
|
+#endif
|
|
|
+
|
|
|
spinlock_t lock;
|
|
|
|
|
|
struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
|
|
@@ -217,10 +238,16 @@ struct link_free {
|
|
|
};
|
|
|
|
|
|
struct zs_pool {
|
|
|
+ char *name;
|
|
|
+
|
|
|
struct size_class **size_class;
|
|
|
|
|
|
gfp_t flags; /* allocation flags used when growing pool */
|
|
|
atomic_long_t pages_allocated;
|
|
|
+
|
|
|
+#ifdef CONFIG_ZSMALLOC_STAT
|
|
|
+ struct dentry *stat_dentry;
|
|
|
+#endif
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -942,6 +969,166 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_ZSMALLOC_STAT
|
|
|
+
|
|
|
+static inline void zs_stat_inc(struct size_class *class,
|
|
|
+ enum zs_stat_type type, unsigned long cnt)
|
|
|
+{
|
|
|
+ class->stats.objs[type] += cnt;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void zs_stat_dec(struct size_class *class,
|
|
|
+ enum zs_stat_type type, unsigned long cnt)
|
|
|
+{
|
|
|
+ class->stats.objs[type] -= cnt;
|
|
|
+}
|
|
|
+
|
|
|
+static inline unsigned long zs_stat_get(struct size_class *class,
|
|
|
+ enum zs_stat_type type)
|
|
|
+{
|
|
|
+ return class->stats.objs[type];
|
|
|
+}
|
|
|
+
|
|
|
+static int __init zs_stat_init(void)
|
|
|
+{
|
|
|
+ if (!debugfs_initialized())
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
|
|
|
+ if (!zs_stat_root)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void __exit zs_stat_exit(void)
|
|
|
+{
|
|
|
+ debugfs_remove_recursive(zs_stat_root);
|
|
|
+}
|
|
|
+
|
|
|
+static int zs_stats_size_show(struct seq_file *s, void *v)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct zs_pool *pool = s->private;
|
|
|
+ struct size_class *class;
|
|
|
+ int objs_per_zspage;
|
|
|
+ unsigned long obj_allocated, obj_used, pages_used;
|
|
|
+ unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
|
|
|
+
|
|
|
+ seq_printf(s, " %5s %5s %13s %10s %10s\n", "class", "size",
|
|
|
+ "obj_allocated", "obj_used", "pages_used");
|
|
|
+
|
|
|
+ for (i = 0; i < zs_size_classes; i++) {
|
|
|
+ class = pool->size_class[i];
|
|
|
+
|
|
|
+ if (class->index != i)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ spin_lock(&class->lock);
|
|
|
+ obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
|
|
|
+ obj_used = zs_stat_get(class, OBJ_USED);
|
|
|
+ spin_unlock(&class->lock);
|
|
|
+
|
|
|
+ objs_per_zspage = get_maxobj_per_zspage(class->size,
|
|
|
+ class->pages_per_zspage);
|
|
|
+ pages_used = obj_allocated / objs_per_zspage *
|
|
|
+ class->pages_per_zspage;
|
|
|
+
|
|
|
+ seq_printf(s, " %5u %5u %10lu %10lu %10lu\n", i,
|
|
|
+ class->size, obj_allocated, obj_used, pages_used);
|
|
|
+
|
|
|
+ total_objs += obj_allocated;
|
|
|
+ total_used_objs += obj_used;
|
|
|
+ total_pages += pages_used;
|
|
|
+ }
|
|
|
+
|
|
|
+ seq_puts(s, "\n");
|
|
|
+ seq_printf(s, " %5s %5s %10lu %10lu %10lu\n", "Total", "",
|
|
|
+ total_objs, total_used_objs, total_pages);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int zs_stats_size_open(struct inode *inode, struct file *file)
|
|
|
+{
|
|
|
+ return single_open(file, zs_stats_size_show, inode->i_private);
|
|
|
+}
|
|
|
+
|
|
|
+static const struct file_operations zs_stat_size_ops = {
|
|
|
+ .open = zs_stats_size_open,
|
|
|
+ .read = seq_read,
|
|
|
+ .llseek = seq_lseek,
|
|
|
+ .release = single_release,
|
|
|
+};
|
|
|
+
|
|
|
+static int zs_pool_stat_create(char *name, struct zs_pool *pool)
|
|
|
+{
|
|
|
+ struct dentry *entry;
|
|
|
+
|
|
|
+ if (!zs_stat_root)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ entry = debugfs_create_dir(name, zs_stat_root);
|
|
|
+ if (!entry) {
|
|
|
+ pr_warn("debugfs dir <%s> creation failed\n", name);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ pool->stat_dentry = entry;
|
|
|
+
|
|
|
+ entry = debugfs_create_file("obj_in_classes", S_IFREG | S_IRUGO,
|
|
|
+ pool->stat_dentry, pool, &zs_stat_size_ops);
|
|
|
+ if (!entry) {
|
|
|
+ pr_warn("%s: debugfs file entry <%s> creation failed\n",
|
|
|
+ name, "obj_in_classes");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void zs_pool_stat_destroy(struct zs_pool *pool)
|
|
|
+{
|
|
|
+ debugfs_remove_recursive(pool->stat_dentry);
|
|
|
+}
|
|
|
+
|
|
|
+#else /* CONFIG_ZSMALLOC_STAT */
|
|
|
+
|
|
|
+static inline void zs_stat_inc(struct size_class *class,
|
|
|
+ enum zs_stat_type type, unsigned long cnt)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline void zs_stat_dec(struct size_class *class,
|
|
|
+ enum zs_stat_type type, unsigned long cnt)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline unsigned long zs_stat_get(struct size_class *class,
|
|
|
+ enum zs_stat_type type)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int __init zs_stat_init(void)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void __exit zs_stat_exit(void)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline int zs_pool_stat_create(char *name, struct zs_pool *pool)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void zs_pool_stat_destroy(struct zs_pool *pool)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+#endif
|
|
|
+
|
|
|
unsigned long zs_get_total_pages(struct zs_pool *pool)
|
|
|
{
|
|
|
return atomic_long_read(&pool->pages_allocated);
|
|
@@ -1074,7 +1261,10 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
|
|
|
set_zspage_mapping(first_page, class->index, ZS_EMPTY);
|
|
|
atomic_long_add(class->pages_per_zspage,
|
|
|
&pool->pages_allocated);
|
|
|
+
|
|
|
spin_lock(&class->lock);
|
|
|
+ zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
|
|
|
+ class->size, class->pages_per_zspage));
|
|
|
}
|
|
|
|
|
|
obj = (unsigned long)first_page->freelist;
|
|
@@ -1088,6 +1278,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
|
|
|
kunmap_atomic(vaddr);
|
|
|
|
|
|
first_page->inuse++;
|
|
|
+ zs_stat_inc(class, OBJ_USED, 1);
|
|
|
/* Now move the zspage to another fullness group, if required */
|
|
|
fix_fullness_group(pool, first_page);
|
|
|
spin_unlock(&class->lock);
|
|
@@ -1128,6 +1319,12 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
|
|
|
|
|
|
first_page->inuse--;
|
|
|
fullness = fix_fullness_group(pool, first_page);
|
|
|
+
|
|
|
+ zs_stat_dec(class, OBJ_USED, 1);
|
|
|
+ if (fullness == ZS_EMPTY)
|
|
|
+ zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
|
|
|
+ class->size, class->pages_per_zspage));
|
|
|
+
|
|
|
spin_unlock(&class->lock);
|
|
|
|
|
|
if (fullness == ZS_EMPTY) {
|
|
@@ -1158,9 +1355,16 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags)
|
|
|
if (!pool)
|
|
|
return NULL;
|
|
|
|
|
|
+ pool->name = kstrdup(name, GFP_KERNEL);
|
|
|
+ if (!pool->name) {
|
|
|
+ kfree(pool);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
|
|
|
GFP_KERNEL);
|
|
|
if (!pool->size_class) {
|
|
|
+ kfree(pool->name);
|
|
|
kfree(pool);
|
|
|
return NULL;
|
|
|
}
|
|
@@ -1210,6 +1414,9 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags)
|
|
|
|
|
|
pool->flags = flags;
|
|
|
|
|
|
+ if (zs_pool_stat_create(name, pool))
|
|
|
+ goto err;
|
|
|
+
|
|
|
return pool;
|
|
|
|
|
|
err:
|
|
@@ -1222,6 +1429,8 @@ void zs_destroy_pool(struct zs_pool *pool)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
+ zs_pool_stat_destroy(pool);
|
|
|
+
|
|
|
for (i = 0; i < zs_size_classes; i++) {
|
|
|
int fg;
|
|
|
struct size_class *class = pool->size_class[i];
|
|
@@ -1242,6 +1451,7 @@ void zs_destroy_pool(struct zs_pool *pool)
|
|
|
}
|
|
|
|
|
|
kfree(pool->size_class);
|
|
|
+ kfree(pool->name);
|
|
|
kfree(pool);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(zs_destroy_pool);
|
|
@@ -1250,17 +1460,30 @@ static int __init zs_init(void)
|
|
|
{
|
|
|
int ret = zs_register_cpu_notifier();
|
|
|
|
|
|
- if (ret) {
|
|
|
- zs_unregister_cpu_notifier();
|
|
|
- return ret;
|
|
|
- }
|
|
|
+ if (ret)
|
|
|
+ goto notifier_fail;
|
|
|
|
|
|
init_zs_size_classes();
|
|
|
|
|
|
#ifdef CONFIG_ZPOOL
|
|
|
zpool_register_driver(&zs_zpool_driver);
|
|
|
#endif
|
|
|
+
|
|
|
+ ret = zs_stat_init();
|
|
|
+ if (ret) {
|
|
|
+ pr_err("zs stat initialization failed\n");
|
|
|
+ goto stat_fail;
|
|
|
+ }
|
|
|
return 0;
|
|
|
+
|
|
|
+stat_fail:
|
|
|
+#ifdef CONFIG_ZPOOL
|
|
|
+ zpool_unregister_driver(&zs_zpool_driver);
|
|
|
+#endif
|
|
|
+notifier_fail:
|
|
|
+ zs_unregister_cpu_notifier();
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static void __exit zs_exit(void)
|
|
@@ -1269,6 +1492,8 @@ static void __exit zs_exit(void)
|
|
|
zpool_unregister_driver(&zs_zpool_driver);
|
|
|
#endif
|
|
|
zs_unregister_cpu_notifier();
|
|
|
+
|
|
|
+ zs_stat_exit();
|
|
|
}
|
|
|
|
|
|
module_init(zs_init);
|