|
@@ -245,6 +245,7 @@ struct zs_pool {
|
|
|
gfp_t flags; /* allocation flags used when growing pool */
|
|
|
atomic_long_t pages_allocated;
|
|
|
|
|
|
+ struct zs_pool_stats stats;
|
|
|
#ifdef CONFIG_ZSMALLOC_STAT
|
|
|
struct dentry *stat_dentry;
|
|
|
#endif
|
|
@@ -1578,7 +1579,7 @@ struct zs_compact_control {
|
|
|
/* Starting object index within @s_page which used for live object
|
|
|
* in the subpage. */
|
|
|
int index;
|
|
|
- /* how many of objects are migrated */
|
|
|
+ /* How many of objects were migrated */
|
|
|
int nr_migrated;
|
|
|
};
|
|
|
|
|
@@ -1590,7 +1591,6 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
|
|
|
struct page *s_page = cc->s_page;
|
|
|
struct page *d_page = cc->d_page;
|
|
|
unsigned long index = cc->index;
|
|
|
- int nr_migrated = 0;
|
|
|
int ret = 0;
|
|
|
|
|
|
while (1) {
|
|
@@ -1617,13 +1617,12 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
|
|
|
record_obj(handle, free_obj);
|
|
|
unpin_tag(handle);
|
|
|
obj_free(pool, class, used_obj);
|
|
|
- nr_migrated++;
|
|
|
+ cc->nr_migrated++;
|
|
|
}
|
|
|
|
|
|
/* Remember last position in this iteration */
|
|
|
cc->s_page = s_page;
|
|
|
cc->index = index;
|
|
|
- cc->nr_migrated = nr_migrated;
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -1699,14 +1698,13 @@ static unsigned long zs_can_compact(struct size_class *class)
|
|
|
return obj_wasted * get_pages_per_zspage(class->size);
|
|
|
}
|
|
|
|
|
|
-static unsigned long __zs_compact(struct zs_pool *pool,
|
|
|
- struct size_class *class)
|
|
|
+static void __zs_compact(struct zs_pool *pool, struct size_class *class)
|
|
|
{
|
|
|
struct zs_compact_control cc;
|
|
|
struct page *src_page;
|
|
|
struct page *dst_page = NULL;
|
|
|
- unsigned long nr_total_migrated = 0;
|
|
|
|
|
|
+ cc.nr_migrated = 0;
|
|
|
spin_lock(&class->lock);
|
|
|
while ((src_page = isolate_source_page(class))) {
|
|
|
|
|
@@ -1728,7 +1726,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
|
|
break;
|
|
|
|
|
|
putback_zspage(pool, class, dst_page);
|
|
|
- nr_total_migrated += cc.nr_migrated;
|
|
|
}
|
|
|
|
|
|
/* Stop if we couldn't find slot */
|
|
@@ -1738,7 +1735,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
|
|
putback_zspage(pool, class, dst_page);
|
|
|
putback_zspage(pool, class, src_page);
|
|
|
spin_unlock(&class->lock);
|
|
|
- nr_total_migrated += cc.nr_migrated;
|
|
|
cond_resched();
|
|
|
spin_lock(&class->lock);
|
|
|
}
|
|
@@ -1746,15 +1742,14 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
|
|
if (src_page)
|
|
|
putback_zspage(pool, class, src_page);
|
|
|
|
|
|
- spin_unlock(&class->lock);
|
|
|
+ pool->stats.num_migrated += cc.nr_migrated;
|
|
|
|
|
|
- return nr_total_migrated;
|
|
|
+ spin_unlock(&class->lock);
|
|
|
}
|
|
|
|
|
|
unsigned long zs_compact(struct zs_pool *pool)
|
|
|
{
|
|
|
int i;
|
|
|
- unsigned long nr_migrated = 0;
|
|
|
struct size_class *class;
|
|
|
|
|
|
for (i = zs_size_classes - 1; i >= 0; i--) {
|
|
@@ -1763,13 +1758,19 @@ unsigned long zs_compact(struct zs_pool *pool)
|
|
|
continue;
|
|
|
if (class->index != i)
|
|
|
continue;
|
|
|
- nr_migrated += __zs_compact(pool, class);
|
|
|
+ __zs_compact(pool, class);
|
|
|
}
|
|
|
|
|
|
- return nr_migrated;
|
|
|
+ return pool->stats.num_migrated;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(zs_compact);
|
|
|
|
|
|
+void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
|
|
|
+{
|
|
|
+ memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(zs_pool_stats);
|
|
|
+
|
|
|
/**
|
|
|
* zs_create_pool - Creates an allocation pool to work from.
|
|
|
* @flags: allocation flags used to allocate pool metadata
|