|
@@ -41,15 +41,17 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
|
|
|
static unsigned long release_freepages(struct list_head *freelist)
|
|
|
{
|
|
|
struct page *page, *next;
|
|
|
- unsigned long count = 0;
|
|
|
+ unsigned long high_pfn = 0;
|
|
|
|
|
|
list_for_each_entry_safe(page, next, freelist, lru) {
|
|
|
+ unsigned long pfn = page_to_pfn(page);
|
|
|
list_del(&page->lru);
|
|
|
__free_page(page);
|
|
|
- count++;
|
|
|
+ if (pfn > high_pfn)
|
|
|
+ high_pfn = pfn;
|
|
|
}
|
|
|
|
|
|
- return count;
|
|
|
+ return high_pfn;
|
|
|
}
|
|
|
|
|
|
static void map_pages(struct list_head *list)
|
|
@@ -195,16 +197,12 @@ static void update_pageblock_skip(struct compact_control *cc,
|
|
|
|
|
|
/* Update where async and sync compaction should restart */
|
|
|
if (migrate_scanner) {
|
|
|
- if (cc->finished_update_migrate)
|
|
|
- return;
|
|
|
if (pfn > zone->compact_cached_migrate_pfn[0])
|
|
|
zone->compact_cached_migrate_pfn[0] = pfn;
|
|
|
if (cc->mode != MIGRATE_ASYNC &&
|
|
|
pfn > zone->compact_cached_migrate_pfn[1])
|
|
|
zone->compact_cached_migrate_pfn[1] = pfn;
|
|
|
} else {
|
|
|
- if (cc->finished_update_free)
|
|
|
- return;
|
|
|
if (pfn < zone->compact_cached_free_pfn)
|
|
|
zone->compact_cached_free_pfn = pfn;
|
|
|
}
|
|
@@ -715,7 +713,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
del_page_from_lru_list(page, lruvec, page_lru(page));
|
|
|
|
|
|
isolate_success:
|
|
|
- cc->finished_update_migrate = true;
|
|
|
list_add(&page->lru, migratelist);
|
|
|
cc->nr_migratepages++;
|
|
|
nr_isolated++;
|
|
@@ -888,15 +885,6 @@ static void isolate_freepages(struct compact_control *cc)
|
|
|
isolate_start_pfn :
|
|
|
block_start_pfn - pageblock_nr_pages;
|
|
|
|
|
|
- /*
|
|
|
- * Set a flag that we successfully isolated in this pageblock.
|
|
|
- * In the next loop iteration, zone->compact_cached_free_pfn
|
|
|
- * will not be updated and thus it will effectively contain the
|
|
|
- * highest pageblock we isolated pages from.
|
|
|
- */
|
|
|
- if (isolated)
|
|
|
- cc->finished_update_free = true;
|
|
|
-
|
|
|
/*
|
|
|
* isolate_freepages_block() might have aborted due to async
|
|
|
* compaction being contended
|
|
@@ -1251,9 +1239,24 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
- /* Release free pages and check accounting */
|
|
|
- cc->nr_freepages -= release_freepages(&cc->freepages);
|
|
|
- VM_BUG_ON(cc->nr_freepages != 0);
|
|
|
+ /*
|
|
|
+ * Release free pages and update where the free scanner should restart,
|
|
|
+ * so we don't leave any returned pages behind in the next attempt.
|
|
|
+ */
|
|
|
+ if (cc->nr_freepages > 0) {
|
|
|
+ unsigned long free_pfn = release_freepages(&cc->freepages);
|
|
|
+
|
|
|
+ cc->nr_freepages = 0;
|
|
|
+ VM_BUG_ON(free_pfn == 0);
|
|
|
+ /* The cached pfn is always the first in a pageblock */
|
|
|
+ free_pfn &= ~(pageblock_nr_pages-1);
|
|
|
+ /*
|
|
|
+ * Only go back, not forward. The cached pfn might have been
|
|
|
+ * already reset to zone end in compact_finished()
|
|
|
+ */
|
|
|
+ if (free_pfn > zone->compact_cached_free_pfn)
|
|
|
+ zone->compact_cached_free_pfn = free_pfn;
|
|
|
+ }
|
|
|
|
|
|
trace_mm_compaction_end(ret);
|
|
|
|