|
@@ -620,24 +620,27 @@ lookup:
|
|
|
bud = FIRST;
|
|
|
}
|
|
|
|
|
|
- spin_lock(&pool->stale_lock);
|
|
|
- zhdr = list_first_entry_or_null(&pool->stale,
|
|
|
- struct z3fold_header, buddy);
|
|
|
- /*
|
|
|
- * Before allocating a page, let's see if we can take one from the
|
|
|
- * stale pages list. cancel_work_sync() can sleep so we must make
|
|
|
- * sure it won't be called in case we're in atomic context.
|
|
|
- */
|
|
|
- if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
|
|
|
- list_del(&zhdr->buddy);
|
|
|
- spin_unlock(&pool->stale_lock);
|
|
|
- if (can_sleep)
|
|
|
+ page = NULL;
|
|
|
+ if (can_sleep) {
|
|
|
+ spin_lock(&pool->stale_lock);
|
|
|
+ zhdr = list_first_entry_or_null(&pool->stale,
|
|
|
+ struct z3fold_header, buddy);
|
|
|
+ /*
|
|
|
+ * Before allocating a page, let's see if we can take one from
|
|
|
+ * the stale pages list. cancel_work_sync() can sleep so we
|
|
|
+ * limit this case to the contexts where we can sleep
|
|
|
+ */
|
|
|
+ if (zhdr) {
|
|
|
+ list_del(&zhdr->buddy);
|
|
|
+ spin_unlock(&pool->stale_lock);
|
|
|
cancel_work_sync(&zhdr->work);
|
|
|
- page = virt_to_page(zhdr);
|
|
|
- } else {
|
|
|
- spin_unlock(&pool->stale_lock);
|
|
|
- page = alloc_page(gfp);
|
|
|
+ page = virt_to_page(zhdr);
|
|
|
+ } else {
|
|
|
+ spin_unlock(&pool->stale_lock);
|
|
|
+ }
|
|
|
}
|
|
|
+ if (!page)
|
|
|
+ page = alloc_page(gfp);
|
|
|
|
|
|
if (!page)
|
|
|
return -ENOMEM;
|