|
@@ -277,10 +277,11 @@ radix_tree_node_alloc(struct radix_tree_root *root)
|
|
|
|
|
|
/*
|
|
|
* Even if the caller has preloaded, try to allocate from the
|
|
|
- * cache first for the new node to get accounted.
|
|
|
+ * cache first for the new node to get accounted to the memory
|
|
|
+ * cgroup.
|
|
|
*/
|
|
|
ret = kmem_cache_alloc(radix_tree_node_cachep,
|
|
|
- gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
|
|
|
+ gfp_mask | __GFP_NOWARN);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
@@ -303,8 +304,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
|
|
|
kmemleak_update_trace(ret);
|
|
|
goto out;
|
|
|
}
|
|
|
- ret = kmem_cache_alloc(radix_tree_node_cachep,
|
|
|
- gfp_mask | __GFP_ACCOUNT);
|
|
|
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
|
|
out:
|
|
|
BUG_ON(radix_tree_is_internal_node(ret));
|
|
|
return ret;
|
|
@@ -351,6 +351,12 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr)
|
|
|
struct radix_tree_node *node;
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
+ /*
|
|
|
+ * Nodes preloaded by one cgroup can be be used by another cgroup, so
|
|
|
+ * they should never be accounted to any particular memory cgroup.
|
|
|
+ */
|
|
|
+ gfp_mask &= ~__GFP_ACCOUNT;
|
|
|
+
|
|
|
preempt_disable();
|
|
|
rtp = this_cpu_ptr(&radix_tree_preloads);
|
|
|
while (rtp->nr < nr) {
|