|
@@ -3192,17 +3192,6 @@ retry:
|
|
*/
|
|
*/
|
|
alloc_flags = gfp_to_alloc_flags(gfp_mask);
|
|
alloc_flags = gfp_to_alloc_flags(gfp_mask);
|
|
|
|
|
|
- /*
|
|
|
|
- * Find the true preferred zone if the allocation is unconstrained by
|
|
|
|
- * cpusets.
|
|
|
|
- */
|
|
|
|
- if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
|
|
|
|
- struct zoneref *preferred_zoneref;
|
|
|
|
- preferred_zoneref = first_zones_zonelist(ac->zonelist,
|
|
|
|
- ac->high_zoneidx, NULL, &ac->preferred_zone);
|
|
|
|
- ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/* This is the last chance, in general, before the goto nopage. */
|
|
/* This is the last chance, in general, before the goto nopage. */
|
|
page = get_page_from_freelist(gfp_mask, order,
|
|
page = get_page_from_freelist(gfp_mask, order,
|
|
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
|
|
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
|
|
@@ -3358,14 +3347,21 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
struct zoneref *preferred_zoneref;
|
|
struct zoneref *preferred_zoneref;
|
|
struct page *page = NULL;
|
|
struct page *page = NULL;
|
|
unsigned int cpuset_mems_cookie;
|
|
unsigned int cpuset_mems_cookie;
|
|
- int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
|
|
|
|
|
|
+ int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR;
|
|
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
|
|
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
|
|
struct alloc_context ac = {
|
|
struct alloc_context ac = {
|
|
.high_zoneidx = gfp_zone(gfp_mask),
|
|
.high_zoneidx = gfp_zone(gfp_mask),
|
|
|
|
+ .zonelist = zonelist,
|
|
.nodemask = nodemask,
|
|
.nodemask = nodemask,
|
|
.migratetype = gfpflags_to_migratetype(gfp_mask),
|
|
.migratetype = gfpflags_to_migratetype(gfp_mask),
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+ if (cpusets_enabled()) {
|
|
|
|
+ alloc_flags |= ALLOC_CPUSET;
|
|
|
|
+ if (!ac.nodemask)
|
|
|
|
+ ac.nodemask = &cpuset_current_mems_allowed;
|
|
|
|
+ }
|
|
|
|
+
|
|
gfp_mask &= gfp_allowed_mask;
|
|
gfp_mask &= gfp_allowed_mask;
|
|
|
|
|
|
lockdep_trace_alloc(gfp_mask);
|
|
lockdep_trace_alloc(gfp_mask);
|
|
@@ -3389,16 +3385,12 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
retry_cpuset:
|
|
retry_cpuset:
|
|
cpuset_mems_cookie = read_mems_allowed_begin();
|
|
cpuset_mems_cookie = read_mems_allowed_begin();
|
|
|
|
|
|
- /* We set it here, as __alloc_pages_slowpath might have changed it */
|
|
|
|
- ac.zonelist = zonelist;
|
|
|
|
-
|
|
|
|
/* Dirty zone balancing only done in the fast path */
|
|
/* Dirty zone balancing only done in the fast path */
|
|
ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
|
|
ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
|
|
|
|
|
|
/* The preferred zone is used for statistics later */
|
|
/* The preferred zone is used for statistics later */
|
|
preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
|
|
preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
|
|
- ac.nodemask ? : &cpuset_current_mems_allowed,
|
|
|
|
- &ac.preferred_zone);
|
|
|
|
|
|
+ ac.nodemask, &ac.preferred_zone);
|
|
if (!ac.preferred_zone)
|
|
if (!ac.preferred_zone)
|
|
goto out;
|
|
goto out;
|
|
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
|
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
|