|
@@ -1776,7 +1776,7 @@ struct zs_compact_control {
|
|
|
struct page *d_page;
|
|
|
/* Starting object index within @s_page which used for live object
|
|
|
* in the subpage. */
|
|
|
- int index;
|
|
|
+ int obj_idx;
|
|
|
};
|
|
|
|
|
|
static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
|
|
@@ -1786,16 +1786,16 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
|
|
|
unsigned long handle;
|
|
|
struct page *s_page = cc->s_page;
|
|
|
struct page *d_page = cc->d_page;
|
|
|
- unsigned long index = cc->index;
|
|
|
+ int obj_idx = cc->obj_idx;
|
|
|
int ret = 0;
|
|
|
|
|
|
while (1) {
|
|
|
- handle = find_alloced_obj(class, s_page, index);
|
|
|
+ handle = find_alloced_obj(class, s_page, obj_idx);
|
|
|
if (!handle) {
|
|
|
s_page = get_next_page(s_page);
|
|
|
if (!s_page)
|
|
|
break;
|
|
|
- index = 0;
|
|
|
+ obj_idx = 0;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1809,7 +1809,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
|
|
|
used_obj = handle_to_obj(handle);
|
|
|
free_obj = obj_malloc(class, get_zspage(d_page), handle);
|
|
|
zs_object_copy(class, free_obj, used_obj);
|
|
|
- index++;
|
|
|
+ obj_idx++;
|
|
|
/*
|
|
|
* record_obj updates handle's value to free_obj and it will
|
|
|
* invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
|
|
@@ -1824,7 +1824,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
|
|
|
|
|
|
/* Remember last position in this iteration */
|
|
|
cc->s_page = s_page;
|
|
|
- cc->index = index;
|
|
|
+ cc->obj_idx = obj_idx;
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -2279,7 +2279,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
|
|
|
if (!zs_can_compact(class))
|
|
|
break;
|
|
|
|
|
|
- cc.index = 0;
|
|
|
+ cc.obj_idx = 0;
|
|
|
cc.s_page = get_first_page(src_zspage);
|
|
|
|
|
|
while ((dst_zspage = isolate_zspage(class, false))) {
|