|
@@ -169,12 +169,12 @@ void pm_restrict_gfp_mask(void)
|
|
|
WARN_ON(!mutex_is_locked(&pm_mutex));
|
|
|
WARN_ON(saved_gfp_mask);
|
|
|
saved_gfp_mask = gfp_allowed_mask;
|
|
|
- gfp_allowed_mask &= ~GFP_IOFS;
|
|
|
+ gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
|
|
|
}
|
|
|
|
|
|
bool pm_suspended_storage(void)
|
|
|
{
|
|
|
- if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
|
|
|
+ if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
|
|
|
return false;
|
|
|
return true;
|
|
|
}
|
|
@@ -2183,7 +2183,7 @@ static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
|
|
|
return false;
|
|
|
if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
|
|
|
return false;
|
|
|
- if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
|
|
|
+ if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_DIRECT_RECLAIM))
|
|
|
return false;
|
|
|
|
|
|
return should_fail(&fail_page_alloc.attr, 1 << order);
|
|
@@ -2685,7 +2685,7 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
|
|
|
if (test_thread_flag(TIF_MEMDIE) ||
|
|
|
(current->flags & (PF_MEMALLOC | PF_EXITING)))
|
|
|
filter &= ~SHOW_MEM_FILTER_NODES;
|
|
|
- if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
|
|
|
+ if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
|
|
|
filter &= ~SHOW_MEM_FILTER_NODES;
|
|
|
|
|
|
if (fmt) {
|
|
@@ -2945,7 +2945,6 @@ static inline int
|
|
|
gfp_to_alloc_flags(gfp_t gfp_mask)
|
|
|
{
|
|
|
int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
|
|
|
- const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
|
|
|
|
|
|
/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
|
|
|
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
|
|
@@ -2954,11 +2953,11 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
|
|
|
* The caller may dip into page reserves a bit more if the caller
|
|
|
* cannot run direct reclaim, or if the caller has realtime scheduling
|
|
|
* policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
|
|
|
- * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
|
|
|
+ * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
|
|
|
*/
|
|
|
alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
|
|
|
|
|
|
- if (atomic) {
|
|
|
+ if (gfp_mask & __GFP_ATOMIC) {
|
|
|
/*
|
|
|
* Not worth trying to allocate harder for __GFP_NOMEMALLOC even
|
|
|
* if it can't schedule.
|
|
@@ -2995,11 +2994,16 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
|
|
|
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
|
|
|
}
|
|
|
|
|
|
+static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE;
|
|
|
+}
|
|
|
+
|
|
|
static inline struct page *
|
|
|
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
|
struct alloc_context *ac)
|
|
|
{
|
|
|
- const gfp_t wait = gfp_mask & __GFP_WAIT;
|
|
|
+ bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
|
|
|
struct page *page = NULL;
|
|
|
int alloc_flags;
|
|
|
unsigned long pages_reclaimed = 0;
|
|
@@ -3019,16 +3023,24 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * We also sanity check to catch abuse of atomic reserves being used by
|
|
|
+ * callers that are not in atomic context.
|
|
|
+ */
|
|
|
+ if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
|
|
|
+ (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
|
|
|
+ gfp_mask &= ~__GFP_ATOMIC;
|
|
|
+
|
|
|
/*
|
|
|
* If this allocation cannot block and it is for a specific node, then
|
|
|
* fail early. There's no need to wakeup kswapd or retry for a
|
|
|
* speculative node-specific allocation.
|
|
|
*/
|
|
|
- if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !wait)
|
|
|
+ if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !can_direct_reclaim)
|
|
|
goto nopage;
|
|
|
|
|
|
retry:
|
|
|
- if (!(gfp_mask & __GFP_NO_KSWAPD))
|
|
|
+ if (gfp_mask & __GFP_KSWAPD_RECLAIM)
|
|
|
wake_all_kswapds(order, ac);
|
|
|
|
|
|
/*
|
|
@@ -3071,8 +3083,8 @@ retry:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* Atomic allocations - we can't balance anything */
|
|
|
- if (!wait) {
|
|
|
+ /* Caller is not willing to reclaim, we can't balance anything */
|
|
|
+ if (!can_direct_reclaim) {
|
|
|
/*
|
|
|
* All existing users of the deprecated __GFP_NOFAIL are
|
|
|
* blockable, so warn of any new users that actually allow this
|
|
@@ -3102,7 +3114,7 @@ retry:
|
|
|
goto got_pg;
|
|
|
|
|
|
/* Checks for THP-specific high-order allocations */
|
|
|
- if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) {
|
|
|
+ if (is_thp_gfp_mask(gfp_mask)) {
|
|
|
/*
|
|
|
* If compaction is deferred for high-order allocations, it is
|
|
|
* because sync compaction recently failed. If this is the case
|
|
@@ -3137,8 +3149,7 @@ retry:
|
|
|
* fault, so use asynchronous memory compaction for THP unless it is
|
|
|
* khugepaged trying to collapse.
|
|
|
*/
|
|
|
- if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE ||
|
|
|
- (current->flags & PF_KTHREAD))
|
|
|
+ if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD))
|
|
|
migration_mode = MIGRATE_SYNC_LIGHT;
|
|
|
|
|
|
/* Try direct reclaim and then allocating */
|
|
@@ -3209,7 +3220,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
|
|
|
|
lockdep_trace_alloc(gfp_mask);
|
|
|
|
|
|
- might_sleep_if(gfp_mask & __GFP_WAIT);
|
|
|
+ might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
|
|
|
|
|
|
if (should_fail_alloc_page(gfp_mask, order))
|
|
|
return NULL;
|