|
@@ -3855,60 +3855,77 @@ got_pg:
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * This is the 'heart' of the zoned buddy allocator.
|
|
|
- */
|
|
|
-struct page *
|
|
|
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
|
- struct zonelist *zonelist, nodemask_t *nodemask)
|
|
|
+static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
|
|
|
+ struct zonelist *zonelist, nodemask_t *nodemask,
|
|
|
+ struct alloc_context *ac, gfp_t *alloc_mask,
|
|
|
+ unsigned int *alloc_flags)
|
|
|
{
|
|
|
- struct page *page;
|
|
|
- unsigned int alloc_flags = ALLOC_WMARK_LOW;
|
|
|
- gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
|
|
|
- struct alloc_context ac = {
|
|
|
- .high_zoneidx = gfp_zone(gfp_mask),
|
|
|
- .zonelist = zonelist,
|
|
|
- .nodemask = nodemask,
|
|
|
- .migratetype = gfpflags_to_migratetype(gfp_mask),
|
|
|
- };
|
|
|
+ ac->high_zoneidx = gfp_zone(gfp_mask);
|
|
|
+ ac->zonelist = zonelist;
|
|
|
+ ac->nodemask = nodemask;
|
|
|
+ ac->migratetype = gfpflags_to_migratetype(gfp_mask);
|
|
|
|
|
|
if (cpusets_enabled()) {
|
|
|
- alloc_mask |= __GFP_HARDWALL;
|
|
|
- alloc_flags |= ALLOC_CPUSET;
|
|
|
- if (!ac.nodemask)
|
|
|
- ac.nodemask = &cpuset_current_mems_allowed;
|
|
|
+ *alloc_mask |= __GFP_HARDWALL;
|
|
|
+ *alloc_flags |= ALLOC_CPUSET;
|
|
|
+ if (!ac->nodemask)
|
|
|
+ ac->nodemask = &cpuset_current_mems_allowed;
|
|
|
}
|
|
|
|
|
|
- gfp_mask &= gfp_allowed_mask;
|
|
|
-
|
|
|
lockdep_trace_alloc(gfp_mask);
|
|
|
|
|
|
might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
|
|
|
|
|
|
if (should_fail_alloc_page(gfp_mask, order))
|
|
|
- return NULL;
|
|
|
+ return false;
|
|
|
|
|
|
/*
|
|
|
* Check the zones suitable for the gfp_mask contain at least one
|
|
|
* valid zone. It's possible to have an empty zonelist as a result
|
|
|
* of __GFP_THISNODE and a memoryless node
|
|
|
*/
|
|
|
- if (unlikely(!zonelist->_zonerefs->zone))
|
|
|
- return NULL;
|
|
|
+ if (unlikely(!ac->zonelist->_zonerefs->zone))
|
|
|
+ return false;
|
|
|
|
|
|
- if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
|
|
|
- alloc_flags |= ALLOC_CMA;
|
|
|
+ if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
|
|
|
+ *alloc_flags |= ALLOC_CMA;
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
|
|
|
+/* Determine whether to spread dirty pages and what the first usable zone */
|
|
|
+static inline void finalise_ac(gfp_t gfp_mask,
|
|
|
+ unsigned int order, struct alloc_context *ac)
|
|
|
+{
|
|
|
/* Dirty zone balancing only done in the fast path */
|
|
|
- ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
|
|
|
+ ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
|
|
|
|
|
|
/*
|
|
|
* The preferred zone is used for statistics but crucially it is
|
|
|
* also used as the starting point for the zonelist iterator. It
|
|
|
* may get reset for allocations that ignore memory policies.
|
|
|
*/
|
|
|
- ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
|
|
|
- ac.high_zoneidx, ac.nodemask);
|
|
|
+ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
|
|
|
+ ac->high_zoneidx, ac->nodemask);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * This is the 'heart' of the zoned buddy allocator.
|
|
|
+ */
|
|
|
+struct page *
|
|
|
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
|
+ struct zonelist *zonelist, nodemask_t *nodemask)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+ unsigned int alloc_flags = ALLOC_WMARK_LOW;
|
|
|
+ gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
|
|
|
+ struct alloc_context ac = { };
|
|
|
+
|
|
|
+ gfp_mask &= gfp_allowed_mask;
|
|
|
+ if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ finalise_ac(gfp_mask, order, &ac);
|
|
|
if (!ac.preferred_zoneref->zone) {
|
|
|
page = NULL;
|
|
|
/*
|