|
@@ -2322,8 +2322,10 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
|
|
|
#endif /* CONFIG_FAIL_PAGE_ALLOC */
|
|
|
|
|
|
/*
|
|
|
- * Return true if free pages are above 'mark'. This takes into account the order
|
|
|
- * of the allocation.
|
|
|
+ * Return true if free base pages are above 'mark'. For high-order checks it
|
|
|
+ * will return true of the order-0 watermark is reached and there is at least
|
|
|
+ * one free page of a suitable size. Checking now avoids taking the zone lock
|
|
|
+ * to check in the allocation paths if no pages are free.
|
|
|
*/
|
|
|
static bool __zone_watermark_ok(struct zone *z, unsigned int order,
|
|
|
unsigned long mark, int classzone_idx, int alloc_flags,
|
|
@@ -2331,7 +2333,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
|
|
|
{
|
|
|
long min = mark;
|
|
|
int o;
|
|
|
- long free_cma = 0;
|
|
|
+ const int alloc_harder = (alloc_flags & ALLOC_HARDER);
|
|
|
|
|
|
/* free_pages may go negative - that's OK */
|
|
|
free_pages -= (1 << order) - 1;
|
|
@@ -2344,7 +2346,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
|
|
|
* the high-atomic reserves. This will over-estimate the size of the
|
|
|
* atomic reserve but it avoids a search.
|
|
|
*/
|
|
|
- if (likely(!(alloc_flags & ALLOC_HARDER)))
|
|
|
+ if (likely(!alloc_harder))
|
|
|
free_pages -= z->nr_reserved_highatomic;
|
|
|
else
|
|
|
min -= min / 4;
|
|
@@ -2352,22 +2354,45 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
|
|
|
#ifdef CONFIG_CMA
|
|
|
/* If allocation can't use CMA areas don't use free CMA pages */
|
|
|
if (!(alloc_flags & ALLOC_CMA))
|
|
|
- free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
|
|
|
+ free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
|
|
|
#endif
|
|
|
|
|
|
- if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
|
|
|
+ /*
|
|
|
+ * Check watermarks for an order-0 allocation request. If these
|
|
|
+ * are not met, then a high-order request also cannot go ahead
|
|
|
+ * even if a suitable page happened to be free.
|
|
|
+ */
|
|
|
+ if (free_pages <= min + z->lowmem_reserve[classzone_idx])
|
|
|
return false;
|
|
|
- for (o = 0; o < order; o++) {
|
|
|
- /* At the next order, this order's pages become unavailable */
|
|
|
- free_pages -= z->free_area[o].nr_free << o;
|
|
|
|
|
|
- /* Require fewer higher order pages to be free */
|
|
|
- min >>= 1;
|
|
|
+ /* If this is an order-0 request then the watermark is fine */
|
|
|
+ if (!order)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /* For a high-order request, check at least one suitable page is free */
|
|
|
+ for (o = order; o < MAX_ORDER; o++) {
|
|
|
+ struct free_area *area = &z->free_area[o];
|
|
|
+ int mt;
|
|
|
+
|
|
|
+ if (!area->nr_free)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (alloc_harder)
|
|
|
+ return true;
|
|
|
|
|
|
- if (free_pages <= min)
|
|
|
- return false;
|
|
|
+ for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
|
|
|
+ if (!list_empty(&area->free_list[mt]))
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+#ifdef CONFIG_CMA
|
|
|
+ if ((alloc_flags & ALLOC_CMA) &&
|
|
|
+ !list_empty(&area->free_list[MIGRATE_CMA])) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+#endif
|
|
|
}
|
|
|
- return true;
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
|