|
@@ -404,8 +404,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
|
|
WARN_ON(z3fold_page_trylock(zhdr));
|
|
WARN_ON(z3fold_page_trylock(zhdr));
|
|
else
|
|
else
|
|
z3fold_page_lock(zhdr);
|
|
z3fold_page_lock(zhdr);
|
|
- if (test_bit(PAGE_STALE, &page->private) ||
|
|
|
|
- !test_and_clear_bit(NEEDS_COMPACTING, &page->private)) {
|
|
|
|
|
|
+ if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
|
|
z3fold_page_unlock(zhdr);
|
|
z3fold_page_unlock(zhdr);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -413,6 +412,11 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
|
|
list_del_init(&zhdr->buddy);
|
|
list_del_init(&zhdr->buddy);
|
|
spin_unlock(&pool->lock);
|
|
spin_unlock(&pool->lock);
|
|
|
|
|
|
|
|
+ if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
|
|
|
|
+ atomic64_dec(&pool->pages_nr);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
z3fold_compact_page(zhdr);
|
|
z3fold_compact_page(zhdr);
|
|
unbuddied = get_cpu_ptr(pool->unbuddied);
|
|
unbuddied = get_cpu_ptr(pool->unbuddied);
|
|
fchunks = num_free_chunks(zhdr);
|
|
fchunks = num_free_chunks(zhdr);
|
|
@@ -753,9 +757,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
|
|
list_del_init(&zhdr->buddy);
|
|
list_del_init(&zhdr->buddy);
|
|
spin_unlock(&pool->lock);
|
|
spin_unlock(&pool->lock);
|
|
zhdr->cpu = -1;
|
|
zhdr->cpu = -1;
|
|
|
|
+ kref_get(&zhdr->refcount);
|
|
do_compact_page(zhdr, true);
|
|
do_compact_page(zhdr, true);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
+ kref_get(&zhdr->refcount);
|
|
queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
|
|
queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
|
|
z3fold_page_unlock(zhdr);
|
|
z3fold_page_unlock(zhdr);
|
|
}
|
|
}
|