|
@@ -467,11 +467,6 @@ static struct zpool_driver zs_zpool_driver = {
|
|
MODULE_ALIAS("zpool-zsmalloc");
|
|
MODULE_ALIAS("zpool-zsmalloc");
|
|
#endif /* CONFIG_ZPOOL */
|
|
#endif /* CONFIG_ZPOOL */
|
|
|
|
|
|
-static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
|
|
|
|
-{
|
|
|
|
- return pages_per_zspage * PAGE_SIZE / size;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
|
|
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
|
|
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
|
|
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
|
|
|
|
|
|
@@ -1359,16 +1354,14 @@ static void init_zs_size_classes(void)
|
|
zs_size_classes = nr;
|
|
zs_size_classes = nr;
|
|
}
|
|
}
|
|
|
|
|
|
-static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
|
|
|
|
|
|
+static bool can_merge(struct size_class *prev, int pages_per_zspage,
|
|
|
|
+ int objs_per_zspage)
|
|
{
|
|
{
|
|
- if (prev->pages_per_zspage != pages_per_zspage)
|
|
|
|
- return false;
|
|
|
|
|
|
+ if (prev->pages_per_zspage == pages_per_zspage &&
|
|
|
|
+ prev->objs_per_zspage == objs_per_zspage)
|
|
|
|
+ return true;
|
|
|
|
|
|
- if (prev->objs_per_zspage
|
|
|
|
- != get_maxobj_per_zspage(size, pages_per_zspage))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- return true;
|
|
|
|
|
|
+ return false;
|
|
}
|
|
}
|
|
|
|
|
|
static bool zspage_full(struct size_class *class, struct zspage *zspage)
|
|
static bool zspage_full(struct size_class *class, struct zspage *zspage)
|
|
@@ -2438,6 +2431,7 @@ struct zs_pool *zs_create_pool(const char *name)
|
|
for (i = zs_size_classes - 1; i >= 0; i--) {
|
|
for (i = zs_size_classes - 1; i >= 0; i--) {
|
|
int size;
|
|
int size;
|
|
int pages_per_zspage;
|
|
int pages_per_zspage;
|
|
|
|
+ int objs_per_zspage;
|
|
struct size_class *class;
|
|
struct size_class *class;
|
|
int fullness = 0;
|
|
int fullness = 0;
|
|
|
|
|
|
@@ -2445,6 +2439,7 @@ struct zs_pool *zs_create_pool(const char *name)
|
|
if (size > ZS_MAX_ALLOC_SIZE)
|
|
if (size > ZS_MAX_ALLOC_SIZE)
|
|
size = ZS_MAX_ALLOC_SIZE;
|
|
size = ZS_MAX_ALLOC_SIZE;
|
|
pages_per_zspage = get_pages_per_zspage(size);
|
|
pages_per_zspage = get_pages_per_zspage(size);
|
|
|
|
+ objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
|
|
|
|
|
|
/*
|
|
/*
|
|
* size_class is used for normal zsmalloc operation such
|
|
* size_class is used for normal zsmalloc operation such
|
|
@@ -2456,7 +2451,7 @@ struct zs_pool *zs_create_pool(const char *name)
|
|
* previous size_class if possible.
|
|
* previous size_class if possible.
|
|
*/
|
|
*/
|
|
if (prev_class) {
|
|
if (prev_class) {
|
|
- if (can_merge(prev_class, size, pages_per_zspage)) {
|
|
|
|
|
|
+ if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
|
|
pool->size_class[i] = prev_class;
|
|
pool->size_class[i] = prev_class;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
@@ -2469,8 +2464,7 @@ struct zs_pool *zs_create_pool(const char *name)
|
|
class->size = size;
|
|
class->size = size;
|
|
class->index = i;
|
|
class->index = i;
|
|
class->pages_per_zspage = pages_per_zspage;
|
|
class->pages_per_zspage = pages_per_zspage;
|
|
- class->objs_per_zspage = get_maxobj_per_zspage(class->size,
|
|
|
|
- class->pages_per_zspage);
|
|
|
|
|
|
+ class->objs_per_zspage = objs_per_zspage;
|
|
spin_lock_init(&class->lock);
|
|
spin_lock_init(&class->lock);
|
|
pool->size_class[i] = class;
|
|
pool->size_class[i] = class;
|
|
for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
|
|
for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
|