|
|
@@ -857,6 +857,11 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep,
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+static inline gfp_t gfp_exact_node(gfp_t flags)
|
|
|
+{
|
|
|
+ return flags;
|
|
|
+}
|
|
|
+
|
|
|
#else /* CONFIG_NUMA */
|
|
|
|
|
|
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
|
|
|
@@ -1023,6 +1028,15 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
|
|
|
|
|
|
return __cache_free_alien(cachep, objp, node, page_node);
|
|
|
}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Construct gfp mask to allocate from a specific node but do not invoke reclaim
|
|
|
+ * or warn about failures.
|
|
|
+ */
|
|
|
+static inline gfp_t gfp_exact_node(gfp_t flags)
|
|
|
+{
|
|
|
+ return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT;
|
|
|
+}
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
|
@@ -2825,7 +2839,7 @@ alloc_done:
|
|
|
if (unlikely(!ac->avail)) {
|
|
|
int x;
|
|
|
force_grow:
|
|
|
- x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
|
|
|
+ x = cache_grow(cachep, gfp_exact_node(flags), node, NULL);
|
|
|
|
|
|
/* cache_grow can reenable interrupts, then ac could change. */
|
|
|
ac = cpu_cache_get(cachep);
|
|
|
@@ -3019,7 +3033,7 @@ retry:
|
|
|
get_node(cache, nid) &&
|
|
|
get_node(cache, nid)->free_objects) {
|
|
|
obj = ____cache_alloc_node(cache,
|
|
|
- flags | GFP_THISNODE, nid);
|
|
|
+ gfp_exact_node(flags), nid);
|
|
|
if (obj)
|
|
|
break;
|
|
|
}
|
|
|
@@ -3047,7 +3061,7 @@ retry:
|
|
|
nid = page_to_nid(page);
|
|
|
if (cache_grow(cache, flags, nid, page)) {
|
|
|
obj = ____cache_alloc_node(cache,
|
|
|
- flags | GFP_THISNODE, nid);
|
|
|
+ gfp_exact_node(flags), nid);
|
|
|
if (!obj)
|
|
|
/*
|
|
|
* Another processor may allocate the
|
|
|
@@ -3118,7 +3132,7 @@ retry:
|
|
|
|
|
|
must_grow:
|
|
|
spin_unlock(&n->list_lock);
|
|
|
- x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
|
|
|
+ x = cache_grow(cachep, gfp_exact_node(flags), nodeid, NULL);
|
|
|
if (x)
|
|
|
goto retry;
|
|
|
|