|
|
@@ -3398,31 +3398,26 @@ retry_cpuset:
|
|
|
ac.nodemask, &ac.preferred_zone);
|
|
|
if (!ac.preferred_zone) {
|
|
|
page = NULL;
|
|
|
- goto out;
|
|
|
+ goto no_zone;
|
|
|
}
|
|
|
|
|
|
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
|
|
|
|
|
/* First allocation attempt */
|
|
|
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
|
|
|
- if (unlikely(!page)) {
|
|
|
- /*
|
|
|
- * Runtime PM, block IO and its error handling path
|
|
|
- * can deadlock because I/O on the device might not
|
|
|
- * complete.
|
|
|
- */
|
|
|
- alloc_mask = memalloc_noio_flags(gfp_mask);
|
|
|
- ac.spread_dirty_pages = false;
|
|
|
-
|
|
|
- page = __alloc_pages_slowpath(alloc_mask, order, &ac);
|
|
|
- }
|
|
|
+ if (likely(page))
|
|
|
+ goto out;
|
|
|
|
|
|
- if (kmemcheck_enabled && page)
|
|
|
- kmemcheck_pagealloc_alloc(page, order, gfp_mask);
|
|
|
+ /*
|
|
|
+ * Runtime PM, block IO and its error handling path can deadlock
|
|
|
+ * because I/O on the device might not complete.
|
|
|
+ */
|
|
|
+ alloc_mask = memalloc_noio_flags(gfp_mask);
|
|
|
+ ac.spread_dirty_pages = false;
|
|
|
|
|
|
- trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
|
|
|
+ page = __alloc_pages_slowpath(alloc_mask, order, &ac);
|
|
|
|
|
|
-out:
|
|
|
+no_zone:
|
|
|
/*
|
|
|
* When updating a task's mems_allowed, it is possible to race with
|
|
|
* parallel threads in such a way that an allocation can fail while
|
|
|
@@ -3434,6 +3429,12 @@ out:
|
|
|
goto retry_cpuset;
|
|
|
}
|
|
|
|
|
|
+out:
|
|
|
+ if (kmemcheck_enabled && page)
|
|
|
+ kmemcheck_pagealloc_alloc(page, order, gfp_mask);
|
|
|
+
|
|
|
+ trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
|
|
|
+
|
|
|
return page;
|
|
|
}
|
|
|
EXPORT_SYMBOL(__alloc_pages_nodemask);
|