|
@@ -3350,7 +3350,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
|
struct page *page;
|
|
|
unsigned int cpuset_mems_cookie;
|
|
|
unsigned int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR;
|
|
|
- gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
|
|
|
+ gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
|
|
|
struct alloc_context ac = {
|
|
|
.high_zoneidx = gfp_zone(gfp_mask),
|
|
|
.zonelist = zonelist,
|
|
@@ -3359,6 +3359,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
|
};
|
|
|
|
|
|
if (cpusets_enabled()) {
|
|
|
+ alloc_mask |= __GFP_HARDWALL;
|
|
|
alloc_flags |= ALLOC_CPUSET;
|
|
|
if (!ac.nodemask)
|
|
|
ac.nodemask = &cpuset_current_mems_allowed;
|
|
@@ -3401,7 +3402,6 @@ retry_cpuset:
|
|
|
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
|
|
|
|
|
/* First allocation attempt */
|
|
|
- alloc_mask = gfp_mask|__GFP_HARDWALL;
|
|
|
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
|
|
|
if (unlikely(!page)) {
|
|
|
/*
|
|
@@ -3427,8 +3427,10 @@ out:
|
|
|
* the mask is being updated. If a page allocation is about to fail,
|
|
|
* check if the cpuset changed during allocation and if so, retry.
|
|
|
*/
|
|
|
- if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
|
|
|
+ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
|
|
|
+ alloc_mask = gfp_mask;
|
|
|
goto retry_cpuset;
|
|
|
+ }
|
|
|
|
|
|
return page;
|
|
|
}
|