|
@@ -298,10 +298,12 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
|
|
|
{
|
|
|
int i;
|
|
|
int nr = pagevec_count(pvec);
|
|
|
- int delta_munlocked = -nr;
|
|
|
+ int delta_munlocked;
|
|
|
struct pagevec pvec_putback;
|
|
|
int pgrescued = 0;
|
|
|
|
|
|
+ pagevec_init(&pvec_putback, 0);
|
|
|
+
|
|
|
/* Phase 1: page isolation */
|
|
|
spin_lock_irq(&zone->lru_lock);
|
|
|
for (i = 0; i < nr; i++) {
|
|
@@ -330,18 +332,21 @@ skip_munlock:
|
|
|
/*
|
|
|
* We won't be munlocking this page in the next phase
|
|
|
* but we still need to release the follow_page_mask()
|
|
|
- * pin.
|
|
|
+ * pin. We cannot do it under lru_lock however. If it's
|
|
|
+ * the last pin, __page_cache_release would deadlock.
|
|
|
*/
|
|
|
+ pagevec_add(&pvec_putback, pvec->pages[i]);
|
|
|
pvec->pages[i] = NULL;
|
|
|
- put_page(page);
|
|
|
- delta_munlocked++;
|
|
|
}
|
|
|
}
|
|
|
+ delta_munlocked = -nr + pagevec_count(&pvec_putback);
|
|
|
__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ /* Now we can release pins of pages that we are not munlocking */
|
|
|
+ pagevec_release(&pvec_putback);
|
|
|
+
|
|
|
/* Phase 2: page munlock */
|
|
|
- pagevec_init(&pvec_putback, 0);
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
struct page *page = pvec->pages[i];
|
|
|
|