|
@@ -4060,17 +4060,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
|
unsigned int cpuset_mems_cookie;
|
|
|
int reserve_flags;
|
|
|
|
|
|
- /*
|
|
|
- * In the slowpath, we sanity check order to avoid ever trying to
|
|
|
- * reclaim >= MAX_ORDER areas which will never succeed. Callers may
|
|
|
- * be using allocators in order of preference for an area that is
|
|
|
- * too large.
|
|
|
- */
|
|
|
- if (order >= MAX_ORDER) {
|
|
|
- WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
|
|
|
- return NULL;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* We also sanity check to catch abuse of atomic reserves being used by
|
|
|
* callers that are not in atomic context.
|
|
@@ -4364,6 +4353,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
|
|
|
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
|
|
|
struct alloc_context ac = { };
|
|
|
|
|
|
+ /*
|
|
|
+ * There are several places where we assume that the order value is sane
|
|
|
+ * so bail out early if the request is out of bound.
|
|
|
+ */
|
|
|
+ if (unlikely(order >= MAX_ORDER)) {
|
|
|
+ WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
gfp_mask &= gfp_allowed_mask;
|
|
|
alloc_mask = gfp_mask;
|
|
|
if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
|