|
@@ -3577,6 +3577,14 @@ retry_cpuset:
|
|
no_progress_loops = 0;
|
|
no_progress_loops = 0;
|
|
compact_priority = DEF_COMPACT_PRIORITY;
|
|
compact_priority = DEF_COMPACT_PRIORITY;
|
|
cpuset_mems_cookie = read_mems_allowed_begin();
|
|
cpuset_mems_cookie = read_mems_allowed_begin();
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * The fast path uses conservative alloc_flags to succeed only until
|
|
|
|
+ * kswapd needs to be woken up, and to avoid the cost of setting up
|
|
|
|
+ * alloc_flags precisely. So we do that now.
|
|
|
|
+ */
|
|
|
|
+ alloc_flags = gfp_to_alloc_flags(gfp_mask);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* We need to recalculate the starting point for the zonelist iterator
|
|
* We need to recalculate the starting point for the zonelist iterator
|
|
* because we might have used different nodemask in the fast path, or
|
|
* because we might have used different nodemask in the fast path, or
|
|
@@ -3588,14 +3596,6 @@ retry_cpuset:
|
|
if (!ac->preferred_zoneref->zone)
|
|
if (!ac->preferred_zoneref->zone)
|
|
goto nopage;
|
|
goto nopage;
|
|
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * The fast path uses conservative alloc_flags to succeed only until
|
|
|
|
- * kswapd needs to be woken up, and to avoid the cost of setting up
|
|
|
|
- * alloc_flags precisely. So we do that now.
|
|
|
|
- */
|
|
|
|
- alloc_flags = gfp_to_alloc_flags(gfp_mask);
|
|
|
|
-
|
|
|
|
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
|
|
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
|
|
wake_all_kswapds(order, ac);
|
|
wake_all_kswapds(order, ac);
|
|
|
|
|
|
@@ -3672,35 +3672,21 @@ retry:
|
|
goto got_pg;
|
|
goto got_pg;
|
|
|
|
|
|
/* Caller is not willing to reclaim, we can't balance anything */
|
|
/* Caller is not willing to reclaim, we can't balance anything */
|
|
- if (!can_direct_reclaim) {
|
|
|
|
- /*
|
|
|
|
- * All existing users of the __GFP_NOFAIL are blockable, so warn
|
|
|
|
- * of any new users that actually allow this type of allocation
|
|
|
|
- * to fail.
|
|
|
|
- */
|
|
|
|
- WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
|
|
|
|
|
|
+ if (!can_direct_reclaim)
|
|
goto nopage;
|
|
goto nopage;
|
|
- }
|
|
|
|
|
|
|
|
- /* Avoid recursion of direct reclaim */
|
|
|
|
- if (current->flags & PF_MEMALLOC) {
|
|
|
|
- /*
|
|
|
|
- * __GFP_NOFAIL request from this context is rather bizarre
|
|
|
|
- * because we cannot reclaim anything and only can loop waiting
|
|
|
|
- * for somebody to do a work for us.
|
|
|
|
- */
|
|
|
|
- if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
|
|
|
|
- cond_resched();
|
|
|
|
- goto retry;
|
|
|
|
- }
|
|
|
|
- goto nopage;
|
|
|
|
|
|
+ /* Make sure we know about allocations which stall for too long */
|
|
|
|
+ if (time_after(jiffies, alloc_start + stall_timeout)) {
|
|
|
|
+ warn_alloc(gfp_mask, ac->nodemask,
|
|
|
|
+ "page allocation stalls for %ums, order:%u",
|
|
|
|
+ jiffies_to_msecs(jiffies-alloc_start), order);
|
|
|
|
+ stall_timeout += 10 * HZ;
|
|
}
|
|
}
|
|
|
|
|
|
- /* Avoid allocations with no watermarks from looping endlessly */
|
|
|
|
- if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
|
|
|
|
|
|
+ /* Avoid recursion of direct reclaim */
|
|
|
|
+ if (current->flags & PF_MEMALLOC)
|
|
goto nopage;
|
|
goto nopage;
|
|
|
|
|
|
-
|
|
|
|
/* Try direct reclaim and then allocating */
|
|
/* Try direct reclaim and then allocating */
|
|
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
|
|
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
|
|
&did_some_progress);
|
|
&did_some_progress);
|
|
@@ -3724,14 +3710,6 @@ retry:
|
|
if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
|
|
if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
|
|
goto nopage;
|
|
goto nopage;
|
|
|
|
|
|
- /* Make sure we know about allocations which stall for too long */
|
|
|
|
- if (time_after(jiffies, alloc_start + stall_timeout)) {
|
|
|
|
- warn_alloc(gfp_mask, ac->nodemask,
|
|
|
|
- "page allocation stalls for %ums, order:%u",
|
|
|
|
- jiffies_to_msecs(jiffies-alloc_start), order);
|
|
|
|
- stall_timeout += 10 * HZ;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
|
|
if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
|
|
did_some_progress > 0, &no_progress_loops))
|
|
did_some_progress > 0, &no_progress_loops))
|
|
goto retry;
|
|
goto retry;
|
|
@@ -3760,6 +3738,10 @@ retry:
|
|
if (page)
|
|
if (page)
|
|
goto got_pg;
|
|
goto got_pg;
|
|
|
|
|
|
|
|
+ /* Avoid allocations with no watermarks from looping endlessly */
|
|
|
|
+ if (test_thread_flag(TIF_MEMDIE))
|
|
|
|
+ goto nopage;
|
|
|
|
+
|
|
/* Retry as long as the OOM killer is making progress */
|
|
/* Retry as long as the OOM killer is making progress */
|
|
if (did_some_progress) {
|
|
if (did_some_progress) {
|
|
no_progress_loops = 0;
|
|
no_progress_loops = 0;
|
|
@@ -3777,6 +3759,37 @@ nopage:
|
|
if (read_mems_allowed_retry(cpuset_mems_cookie))
|
|
if (read_mems_allowed_retry(cpuset_mems_cookie))
|
|
goto retry_cpuset;
|
|
goto retry_cpuset;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
|
|
|
|
+ * we always retry
|
|
|
|
+ */
|
|
|
|
+ if (gfp_mask & __GFP_NOFAIL) {
|
|
|
|
+ /*
|
|
|
|
+ * All existing users of the __GFP_NOFAIL are blockable, so warn
|
|
|
|
+ * of any new users that actually require GFP_NOWAIT
|
|
|
|
+ */
|
|
|
|
+ if (WARN_ON_ONCE(!can_direct_reclaim))
|
|
|
|
+ goto fail;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * PF_MEMALLOC request from this context is rather bizarre
|
|
|
|
+ * because we cannot reclaim anything and only can loop waiting
|
|
|
|
+ * for somebody to do a work for us
|
|
|
|
+ */
|
|
|
|
+ WARN_ON_ONCE(current->flags & PF_MEMALLOC);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * non failing costly orders are a hard requirement which we
|
|
|
|
+ * are not prepared for much so let's warn about these users
|
|
|
|
+ * so that we can identify them and convert them to something
|
|
|
|
+ * else.
|
|
|
|
+ */
|
|
|
|
+ WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
|
|
|
|
+
|
|
|
|
+ cond_resched();
|
|
|
|
+ goto retry;
|
|
|
|
+ }
|
|
|
|
+fail:
|
|
warn_alloc(gfp_mask, ac->nodemask,
|
|
warn_alloc(gfp_mask, ac->nodemask,
|
|
"page allocation failure: order:%u", order);
|
|
"page allocation failure: order:%u", order);
|
|
got_pg:
|
|
got_pg:
|