|
@@ -2893,7 +2893,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
|
|
|
* exists.
|
|
|
*/
|
|
|
watermark = min_wmark_pages(zone) + (1UL << order);
|
|
|
- if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
|
|
|
+ if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
|
|
|
return 0;
|
|
|
|
|
|
__mod_zone_freepage_state(zone, -(1UL << order), mt);
|
|
@@ -3169,12 +3169,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
|
|
|
}
|
|
|
|
|
|
|
|
|
-#ifdef CONFIG_CMA
|
|
|
- /* If allocation can't use CMA areas don't use free CMA pages */
|
|
|
- if (!(alloc_flags & ALLOC_CMA))
|
|
|
- free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
|
|
|
-#endif
|
|
|
-
|
|
|
/*
|
|
|
* Check watermarks for an order-0 allocation request. If these
|
|
|
* are not met, then a high-order request also cannot go ahead
|
|
@@ -3201,10 +3195,8 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_CMA
|
|
|
- if ((alloc_flags & ALLOC_CMA) &&
|
|
|
- !list_empty(&area->free_list[MIGRATE_CMA])) {
|
|
|
+ if (!list_empty(&area->free_list[MIGRATE_CMA]))
|
|
|
return true;
|
|
|
- }
|
|
|
#endif
|
|
|
if (alloc_harder &&
|
|
|
!list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
|
|
@@ -3224,13 +3216,6 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
|
|
|
unsigned long mark, int classzone_idx, unsigned int alloc_flags)
|
|
|
{
|
|
|
long free_pages = zone_page_state(z, NR_FREE_PAGES);
|
|
|
- long cma_pages = 0;
|
|
|
-
|
|
|
-#ifdef CONFIG_CMA
|
|
|
- /* If allocation can't use CMA areas don't use free CMA pages */
|
|
|
- if (!(alloc_flags & ALLOC_CMA))
|
|
|
- cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
|
|
|
-#endif
|
|
|
|
|
|
/*
|
|
|
* Fast check for order-0 only. If this fails then the reserves
|
|
@@ -3239,7 +3224,7 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
|
|
|
* the caller is !atomic then it'll uselessly search the free
|
|
|
* list. That corner case is then slower but it is harmless.
|
|
|
*/
|
|
|
- if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
|
|
|
+ if (!order && free_pages > mark + z->lowmem_reserve[classzone_idx])
|
|
|
return true;
|
|
|
|
|
|
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
|
|
@@ -3875,10 +3860,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
|
|
|
} else if (unlikely(rt_task(current)) && !in_interrupt())
|
|
|
alloc_flags |= ALLOC_HARDER;
|
|
|
|
|
|
-#ifdef CONFIG_CMA
|
|
|
- if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
|
|
|
- alloc_flags |= ALLOC_CMA;
|
|
|
-#endif
|
|
|
return alloc_flags;
|
|
|
}
|
|
|
|
|
@@ -4345,9 +4326,6 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
|
|
|
if (should_fail_alloc_page(gfp_mask, order))
|
|
|
return false;
|
|
|
|
|
|
- if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
|
|
|
- *alloc_flags |= ALLOC_CMA;
|
|
|
-
|
|
|
return true;
|
|
|
}
|
|
|
|