|
@@ -1349,7 +1349,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * zone->lru_lock is heavily contended. Some of the functions that
|
|
|
|
|
|
+ * zone_lru_lock is heavily contended. Some of the functions that
|
|
* shrink the lists perform better by taking out a batch of pages
|
|
* shrink the lists perform better by taking out a batch of pages
|
|
* and working on them outside the LRU lock.
|
|
* and working on them outside the LRU lock.
|
|
*
|
|
*
|
|
@@ -1444,7 +1444,7 @@ int isolate_lru_page(struct page *page)
|
|
struct zone *zone = page_zone(page);
|
|
struct zone *zone = page_zone(page);
|
|
struct lruvec *lruvec;
|
|
struct lruvec *lruvec;
|
|
|
|
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_lock_irq(zone_lru_lock(zone));
|
|
lruvec = mem_cgroup_page_lruvec(page, zone);
|
|
lruvec = mem_cgroup_page_lruvec(page, zone);
|
|
if (PageLRU(page)) {
|
|
if (PageLRU(page)) {
|
|
int lru = page_lru(page);
|
|
int lru = page_lru(page);
|
|
@@ -1453,7 +1453,7 @@ int isolate_lru_page(struct page *page)
|
|
del_page_from_lru_list(page, lruvec, lru);
|
|
del_page_from_lru_list(page, lruvec, lru);
|
|
ret = 0;
|
|
ret = 0;
|
|
}
|
|
}
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
}
|
|
}
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -1512,9 +1512,9 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
|
|
VM_BUG_ON_PAGE(PageLRU(page), page);
|
|
VM_BUG_ON_PAGE(PageLRU(page), page);
|
|
list_del(&page->lru);
|
|
list_del(&page->lru);
|
|
if (unlikely(!page_evictable(page))) {
|
|
if (unlikely(!page_evictable(page))) {
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
putback_lru_page(page);
|
|
putback_lru_page(page);
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_lock_irq(zone_lru_lock(zone));
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1535,10 +1535,10 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
|
|
del_page_from_lru_list(page, lruvec, lru);
|
|
del_page_from_lru_list(page, lruvec, lru);
|
|
|
|
|
|
if (unlikely(PageCompound(page))) {
|
|
if (unlikely(PageCompound(page))) {
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
mem_cgroup_uncharge(page);
|
|
mem_cgroup_uncharge(page);
|
|
(*get_compound_page_dtor(page))(page);
|
|
(*get_compound_page_dtor(page))(page);
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_lock_irq(zone_lru_lock(zone));
|
|
} else
|
|
} else
|
|
list_add(&page->lru, &pages_to_free);
|
|
list_add(&page->lru, &pages_to_free);
|
|
}
|
|
}
|
|
@@ -1600,7 +1600,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
if (!sc->may_writepage)
|
|
if (!sc->may_writepage)
|
|
isolate_mode |= ISOLATE_CLEAN;
|
|
isolate_mode |= ISOLATE_CLEAN;
|
|
|
|
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_lock_irq(zone_lru_lock(zone));
|
|
|
|
|
|
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
|
|
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
|
|
&nr_scanned, sc, isolate_mode, lru);
|
|
&nr_scanned, sc, isolate_mode, lru);
|
|
@@ -1616,7 +1616,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
else
|
|
else
|
|
__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
|
|
__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
|
|
}
|
|
}
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
|
|
|
|
if (nr_taken == 0)
|
|
if (nr_taken == 0)
|
|
return 0;
|
|
return 0;
|
|
@@ -1626,7 +1626,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
&nr_writeback, &nr_immediate,
|
|
&nr_writeback, &nr_immediate,
|
|
false);
|
|
false);
|
|
|
|
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_lock_irq(zone_lru_lock(zone));
|
|
|
|
|
|
if (global_reclaim(sc)) {
|
|
if (global_reclaim(sc)) {
|
|
if (current_is_kswapd())
|
|
if (current_is_kswapd())
|
|
@@ -1641,7 +1641,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
|
|
|
|
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
|
|
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
|
|
|
|
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
|
|
|
|
mem_cgroup_uncharge_list(&page_list);
|
|
mem_cgroup_uncharge_list(&page_list);
|
|
free_hot_cold_page_list(&page_list, true);
|
|
free_hot_cold_page_list(&page_list, true);
|
|
@@ -1715,9 +1715,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
* processes, from rmap.
|
|
* processes, from rmap.
|
|
*
|
|
*
|
|
* If the pages are mostly unmapped, the processing is fast and it is
|
|
* If the pages are mostly unmapped, the processing is fast and it is
|
|
- * appropriate to hold zone->lru_lock across the whole operation. But if
|
|
|
|
|
|
+ * appropriate to hold zone_lru_lock across the whole operation. But if
|
|
* the pages are mapped, the processing is slow (page_referenced()) so we
|
|
* the pages are mapped, the processing is slow (page_referenced()) so we
|
|
- * should drop zone->lru_lock around each page. It's impossible to balance
|
|
|
|
|
|
+ * should drop zone_lru_lock around each page. It's impossible to balance
|
|
* this, so instead we remove the pages from the LRU while processing them.
|
|
* this, so instead we remove the pages from the LRU while processing them.
|
|
* It is safe to rely on PG_active against the non-LRU pages in here because
|
|
* It is safe to rely on PG_active against the non-LRU pages in here because
|
|
* nobody will play with that bit on a non-LRU page.
|
|
* nobody will play with that bit on a non-LRU page.
|
|
@@ -1754,10 +1754,10 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
|
|
del_page_from_lru_list(page, lruvec, lru);
|
|
del_page_from_lru_list(page, lruvec, lru);
|
|
|
|
|
|
if (unlikely(PageCompound(page))) {
|
|
if (unlikely(PageCompound(page))) {
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
mem_cgroup_uncharge(page);
|
|
mem_cgroup_uncharge(page);
|
|
(*get_compound_page_dtor(page))(page);
|
|
(*get_compound_page_dtor(page))(page);
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_lock_irq(zone_lru_lock(zone));
|
|
} else
|
|
} else
|
|
list_add(&page->lru, pages_to_free);
|
|
list_add(&page->lru, pages_to_free);
|
|
}
|
|
}
|
|
@@ -1792,7 +1792,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
|
if (!sc->may_writepage)
|
|
if (!sc->may_writepage)
|
|
isolate_mode |= ISOLATE_CLEAN;
|
|
isolate_mode |= ISOLATE_CLEAN;
|
|
|
|
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_lock_irq(zone_lru_lock(zone));
|
|
|
|
|
|
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
|
|
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
|
|
&nr_scanned, sc, isolate_mode, lru);
|
|
&nr_scanned, sc, isolate_mode, lru);
|
|
@@ -1805,7 +1805,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
|
__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
|
|
__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
|
|
__count_zone_vm_events(PGREFILL, zone, nr_scanned);
|
|
__count_zone_vm_events(PGREFILL, zone, nr_scanned);
|
|
|
|
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
|
|
|
|
while (!list_empty(&l_hold)) {
|
|
while (!list_empty(&l_hold)) {
|
|
cond_resched();
|
|
cond_resched();
|
|
@@ -1850,7 +1850,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
|
/*
|
|
/*
|
|
* Move pages back to the lru list.
|
|
* Move pages back to the lru list.
|
|
*/
|
|
*/
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_lock_irq(zone_lru_lock(zone));
|
|
/*
|
|
/*
|
|
* Count referenced pages from currently used mappings as rotated,
|
|
* Count referenced pages from currently used mappings as rotated,
|
|
* even though only some of them are actually re-activated. This
|
|
* even though only some of them are actually re-activated. This
|
|
@@ -1862,7 +1862,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
|
move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
|
|
move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
|
|
move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
|
|
move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
|
|
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
|
|
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
|
|
|
|
mem_cgroup_uncharge_list(&l_hold);
|
|
mem_cgroup_uncharge_list(&l_hold);
|
|
free_hot_cold_page_list(&l_hold, true);
|
|
free_hot_cold_page_list(&l_hold, true);
|
|
@@ -2077,7 +2077,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
|
|
file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) +
|
|
file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) +
|
|
lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
|
|
lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
|
|
|
|
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_lock_irq(zone_lru_lock(zone));
|
|
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
|
|
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
|
|
reclaim_stat->recent_scanned[0] /= 2;
|
|
reclaim_stat->recent_scanned[0] /= 2;
|
|
reclaim_stat->recent_rotated[0] /= 2;
|
|
reclaim_stat->recent_rotated[0] /= 2;
|
|
@@ -2098,7 +2098,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
|
|
|
|
|
|
fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
|
|
fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
|
|
fp /= reclaim_stat->recent_rotated[1] + 1;
|
|
fp /= reclaim_stat->recent_rotated[1] + 1;
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
|
|
|
|
fraction[0] = ap;
|
|
fraction[0] = ap;
|
|
fraction[1] = fp;
|
|
fraction[1] = fp;
|
|
@@ -3791,9 +3791,9 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
|
|
pagezone = page_zone(page);
|
|
pagezone = page_zone(page);
|
|
if (pagezone != zone) {
|
|
if (pagezone != zone) {
|
|
if (zone)
|
|
if (zone)
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
zone = pagezone;
|
|
zone = pagezone;
|
|
- spin_lock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_lock_irq(zone_lru_lock(zone));
|
|
}
|
|
}
|
|
lruvec = mem_cgroup_page_lruvec(page, zone);
|
|
lruvec = mem_cgroup_page_lruvec(page, zone);
|
|
|
|
|
|
@@ -3814,7 +3814,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
|
|
if (zone) {
|
|
if (zone) {
|
|
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
|
|
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
|
|
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
|
|
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
|
|
- spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
+ spin_unlock_irq(zone_lru_lock(zone));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#endif /* CONFIG_SHMEM */
|
|
#endif /* CONFIG_SHMEM */
|