|
@@ -1471,7 +1471,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(zs_free);
|
|
|
|
|
|
-static void zs_object_copy(unsigned long src, unsigned long dst,
|
|
|
+static void zs_object_copy(unsigned long dst, unsigned long src,
|
|
|
struct size_class *class)
|
|
|
{
|
|
|
struct page *s_page, *d_page;
|
|
@@ -1612,7 +1612,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
|
|
|
|
|
|
used_obj = handle_to_obj(handle);
|
|
|
free_obj = obj_malloc(d_page, class, handle);
|
|
|
- zs_object_copy(used_obj, free_obj, class);
|
|
|
+ zs_object_copy(free_obj, used_obj, class);
|
|
|
index++;
|
|
|
record_obj(handle, free_obj);
|
|
|
unpin_tag(handle);
|
|
@@ -1628,7 +1628,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static struct page *alloc_target_page(struct size_class *class)
|
|
|
+static struct page *isolate_target_page(struct size_class *class)
|
|
|
{
|
|
|
int i;
|
|
|
struct page *page;
|
|
@@ -1718,11 +1718,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
|
|
cc.index = 0;
|
|
|
cc.s_page = src_page;
|
|
|
|
|
|
- while ((dst_page = alloc_target_page(class))) {
|
|
|
+ while ((dst_page = isolate_target_page(class))) {
|
|
|
cc.d_page = dst_page;
|
|
|
/*
|
|
|
- * If there is no more space in dst_page, try to
|
|
|
- * allocate another zspage.
|
|
|
+ * If there is no more space in dst_page, resched
|
|
|
+ * and see if anyone had allocated another zspage.
|
|
|
*/
|
|
|
if (!migrate_zspage(pool, class, &cc))
|
|
|
break;
|