|
@@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
|
|
|
|
|
|
WARN_ON(!list_empty(&zhdr->buddy));
|
|
|
set_bit(PAGE_STALE, &page->private);
|
|
|
+ clear_bit(NEEDS_COMPACTING, &page->private);
|
|
|
spin_lock(&pool->lock);
|
|
|
if (!list_empty(&page->lru))
|
|
|
list_del(&page->lru);
|
|
@@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w)
|
|
|
list_del(&zhdr->buddy);
|
|
|
if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
|
|
|
continue;
|
|
|
- clear_bit(NEEDS_COMPACTING, &page->private);
|
|
|
spin_unlock(&pool->stale_lock);
|
|
|
cancel_work_sync(&zhdr->work);
|
|
|
free_z3fold_page(page);
|
|
@@ -624,10 +624,8 @@ lookup:
|
|
|
* stale pages list. cancel_work_sync() can sleep so we must make
|
|
|
* sure it won't be called in case we're in atomic context.
|
|
|
*/
|
|
|
- if (zhdr && (can_sleep || !work_pending(&zhdr->work) ||
|
|
|
- !unlikely(work_busy(&zhdr->work)))) {
|
|
|
+ if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
|
|
|
list_del(&zhdr->buddy);
|
|
|
- clear_bit(NEEDS_COMPACTING, &page->private);
|
|
|
spin_unlock(&pool->stale_lock);
|
|
|
if (can_sleep)
|
|
|
cancel_work_sync(&zhdr->work);
|