|
@@ -1832,9 +1832,9 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
|
|
* Note that start_page and end_pages are not aligned on a pageblock
|
|
* Note that start_page and end_pages are not aligned on a pageblock
|
|
* boundary. If alignment is required, use move_freepages_block()
|
|
* boundary. If alignment is required, use move_freepages_block()
|
|
*/
|
|
*/
|
|
-int move_freepages(struct zone *zone,
|
|
|
|
|
|
+static int move_freepages(struct zone *zone,
|
|
struct page *start_page, struct page *end_page,
|
|
struct page *start_page, struct page *end_page,
|
|
- int migratetype)
|
|
|
|
|
|
+ int migratetype, int *num_movable)
|
|
{
|
|
{
|
|
struct page *page;
|
|
struct page *page;
|
|
unsigned int order;
|
|
unsigned int order;
|
|
@@ -1851,6 +1851,9 @@ int move_freepages(struct zone *zone,
|
|
VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
|
|
VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+ if (num_movable)
|
|
|
|
+ *num_movable = 0;
|
|
|
|
+
|
|
for (page = start_page; page <= end_page;) {
|
|
for (page = start_page; page <= end_page;) {
|
|
if (!pfn_valid_within(page_to_pfn(page))) {
|
|
if (!pfn_valid_within(page_to_pfn(page))) {
|
|
page++;
|
|
page++;
|
|
@@ -1861,6 +1864,15 @@ int move_freepages(struct zone *zone,
|
|
VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
|
|
VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
|
|
|
|
|
|
if (!PageBuddy(page)) {
|
|
if (!PageBuddy(page)) {
|
|
|
|
+ /*
|
|
|
|
+ * We assume that pages that could be isolated for
|
|
|
|
+ * migration are movable. But we don't actually try
|
|
|
|
+ * isolating, as that would be expensive.
|
|
|
|
+ */
|
|
|
|
+ if (num_movable &&
|
|
|
|
+ (PageLRU(page) || __PageMovable(page)))
|
|
|
|
+ (*num_movable)++;
|
|
|
|
+
|
|
page++;
|
|
page++;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
@@ -1876,7 +1888,7 @@ int move_freepages(struct zone *zone,
|
|
}
|
|
}
|
|
|
|
|
|
int move_freepages_block(struct zone *zone, struct page *page,
|
|
int move_freepages_block(struct zone *zone, struct page *page,
|
|
- int migratetype)
|
|
|
|
|
|
+ int migratetype, int *num_movable)
|
|
{
|
|
{
|
|
unsigned long start_pfn, end_pfn;
|
|
unsigned long start_pfn, end_pfn;
|
|
struct page *start_page, *end_page;
|
|
struct page *start_page, *end_page;
|
|
@@ -1893,7 +1905,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
|
|
if (!zone_spans_pfn(zone, end_pfn))
|
|
if (!zone_spans_pfn(zone, end_pfn))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- return move_freepages(zone, start_page, end_page, migratetype);
|
|
|
|
|
|
+ return move_freepages(zone, start_page, end_page, migratetype,
|
|
|
|
+ num_movable);
|
|
}
|
|
}
|
|
|
|
|
|
static void change_pageblock_range(struct page *pageblock_page,
|
|
static void change_pageblock_range(struct page *pageblock_page,
|
|
@@ -1943,22 +1956,26 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
|
|
/*
|
|
/*
|
|
* This function implements actual steal behaviour. If order is large enough,
|
|
* This function implements actual steal behaviour. If order is large enough,
|
|
* we can steal whole pageblock. If not, we first move freepages in this
|
|
* we can steal whole pageblock. If not, we first move freepages in this
|
|
- * pageblock and check whether half of pages are moved or not. If half of
|
|
|
|
- * pages are moved, we can change migratetype of pageblock and permanently
|
|
|
|
- * use it's pages as requested migratetype in the future.
|
|
|
|
|
|
+ * pageblock to our migratetype and determine how many already-allocated pages
|
|
|
|
+ * are there in the pageblock with a compatible migratetype. If at least half
|
|
|
|
+ * of pages are free or compatible, we can change migratetype of the pageblock
|
|
|
|
+ * itself, so pages freed in the future will be put on the correct free list.
|
|
*/
|
|
*/
|
|
static void steal_suitable_fallback(struct zone *zone, struct page *page,
|
|
static void steal_suitable_fallback(struct zone *zone, struct page *page,
|
|
int start_type, bool whole_block)
|
|
int start_type, bool whole_block)
|
|
{
|
|
{
|
|
unsigned int current_order = page_order(page);
|
|
unsigned int current_order = page_order(page);
|
|
struct free_area *area;
|
|
struct free_area *area;
|
|
- int pages;
|
|
|
|
|
|
+ int free_pages, movable_pages, alike_pages;
|
|
|
|
+ int old_block_type;
|
|
|
|
+
|
|
|
|
+ old_block_type = get_pageblock_migratetype(page);
|
|
|
|
|
|
/*
|
|
/*
|
|
* This can happen due to races and we want to prevent broken
|
|
* This can happen due to races and we want to prevent broken
|
|
* highatomic accounting.
|
|
* highatomic accounting.
|
|
*/
|
|
*/
|
|
- if (is_migrate_highatomic_page(page))
|
|
|
|
|
|
+ if (is_migrate_highatomic(old_block_type))
|
|
goto single_page;
|
|
goto single_page;
|
|
|
|
|
|
/* Take ownership for orders >= pageblock_order */
|
|
/* Take ownership for orders >= pageblock_order */
|
|
@@ -1971,13 +1988,39 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
|
|
if (!whole_block)
|
|
if (!whole_block)
|
|
goto single_page;
|
|
goto single_page;
|
|
|
|
|
|
- pages = move_freepages_block(zone, page, start_type);
|
|
|
|
|
|
+ free_pages = move_freepages_block(zone, page, start_type,
|
|
|
|
+ &movable_pages);
|
|
|
|
+ /*
|
|
|
|
+ * Determine how many pages are compatible with our allocation.
|
|
|
|
+ * For movable allocation, it's the number of movable pages which
|
|
|
|
+ * we just obtained. For other types it's a bit more tricky.
|
|
|
|
+ */
|
|
|
|
+ if (start_type == MIGRATE_MOVABLE) {
|
|
|
|
+ alike_pages = movable_pages;
|
|
|
|
+ } else {
|
|
|
|
+ /*
|
|
|
|
+ * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
|
|
|
|
+ * to MOVABLE pageblock, consider all non-movable pages as
|
|
|
|
+ * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
|
|
|
|
+ * vice versa, be conservative since we can't distinguish the
|
|
|
|
+ * exact migratetype of non-movable pages.
|
|
|
|
+ */
|
|
|
|
+ if (old_block_type == MIGRATE_MOVABLE)
|
|
|
|
+ alike_pages = pageblock_nr_pages
|
|
|
|
+ - (free_pages + movable_pages);
|
|
|
|
+ else
|
|
|
|
+ alike_pages = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
/* moving whole block can fail due to zone boundary conditions */
|
|
/* moving whole block can fail due to zone boundary conditions */
|
|
- if (!pages)
|
|
|
|
|
|
+ if (!free_pages)
|
|
goto single_page;
|
|
goto single_page;
|
|
|
|
|
|
- /* Claim the whole block if over half of it is free */
|
|
|
|
- if (pages >= (1 << (pageblock_order-1)) ||
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If a sufficient number of pages in the block are either free or of
|
|
|
|
+ * comparable migratability as our allocation, claim the whole block.
|
|
|
|
+ */
|
|
|
|
+ if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
|
|
page_group_by_mobility_disabled)
|
|
page_group_by_mobility_disabled)
|
|
set_pageblock_migratetype(page, start_type);
|
|
set_pageblock_migratetype(page, start_type);
|
|
|
|
|
|
@@ -2055,7 +2098,7 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
|
|
&& !is_migrate_cma(mt)) {
|
|
&& !is_migrate_cma(mt)) {
|
|
zone->nr_reserved_highatomic += pageblock_nr_pages;
|
|
zone->nr_reserved_highatomic += pageblock_nr_pages;
|
|
set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
|
|
set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
|
|
- move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
|
|
|
|
|
|
+ move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
out_unlock:
|
|
out_unlock:
|
|
@@ -2132,7 +2175,8 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
|
* may increase.
|
|
* may increase.
|
|
*/
|
|
*/
|
|
set_pageblock_migratetype(page, ac->migratetype);
|
|
set_pageblock_migratetype(page, ac->migratetype);
|
|
- ret = move_freepages_block(zone, page, ac->migratetype);
|
|
|
|
|
|
+ ret = move_freepages_block(zone, page, ac->migratetype,
|
|
|
|
+ NULL);
|
|
if (ret) {
|
|
if (ret) {
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
return ret;
|
|
return ret;
|