|
@@ -191,6 +191,15 @@ radix_tree_node_alloc(struct radix_tree_root *root)
|
|
|
if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
|
|
|
struct radix_tree_preload *rtp;
|
|
|
|
|
|
+ /*
|
|
|
+ * Even if the caller has preloaded, try to allocate from the
|
|
|
+ * cache first for the new node to get accounted.
|
|
|
+ */
|
|
|
+ ret = kmem_cache_alloc(radix_tree_node_cachep,
|
|
|
+ gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
+
|
|
|
/*
|
|
|
* Provided the caller has preloaded here, we will always
|
|
|
* succeed in getting a node here (and never reach
|
|
@@ -208,10 +217,11 @@ radix_tree_node_alloc(struct radix_tree_root *root)
|
|
|
* for debugging.
|
|
|
*/
|
|
|
kmemleak_update_trace(ret);
|
|
|
+ goto out;
|
|
|
}
|
|
|
- if (ret == NULL)
|
|
|
- ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
|
|
-
|
|
|
+ ret = kmem_cache_alloc(radix_tree_node_cachep,
|
|
|
+ gfp_mask | __GFP_ACCOUNT);
|
|
|
+out:
|
|
|
BUG_ON(radix_tree_is_indirect_ptr(ret));
|
|
|
return ret;
|
|
|
}
|