|
@@ -1312,17 +1312,26 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
/*
|
|
|
* Slab allocation and freeing
|
|
|
*/
|
|
|
-static inline struct page *alloc_slab_page(gfp_t flags, int node,
|
|
|
- struct kmem_cache_order_objects oo)
|
|
|
+static inline struct page *alloc_slab_page(struct kmem_cache *s,
|
|
|
+ gfp_t flags, int node, struct kmem_cache_order_objects oo)
|
|
|
{
|
|
|
+ struct page *page;
|
|
|
int order = oo_order(oo);
|
|
|
|
|
|
flags |= __GFP_NOTRACK;
|
|
|
|
|
|
+ if (memcg_charge_slab(s, flags, order))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
if (node == NUMA_NO_NODE)
|
|
|
- return alloc_pages(flags, order);
|
|
|
+ page = alloc_pages(flags, order);
|
|
|
else
|
|
|
- return alloc_pages_exact_node(node, flags, order);
|
|
|
+ page = alloc_pages_exact_node(node, flags, order);
|
|
|
+
|
|
|
+ if (!page)
|
|
|
+ memcg_uncharge_slab(s, order);
|
|
|
+
|
|
|
+ return page;
|
|
|
}
|
|
|
|
|
|
static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
@@ -1344,7 +1353,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
*/
|
|
|
alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
|
|
|
|
|
|
- page = alloc_slab_page(alloc_gfp, node, oo);
|
|
|
+ page = alloc_slab_page(s, alloc_gfp, node, oo);
|
|
|
if (unlikely(!page)) {
|
|
|
oo = s->min;
|
|
|
alloc_gfp = flags;
|
|
@@ -1352,7 +1361,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
* Allocation may have failed due to fragmentation.
|
|
|
* Try a lower order alloc if possible
|
|
|
*/
|
|
|
- page = alloc_slab_page(alloc_gfp, node, oo);
|
|
|
+ page = alloc_slab_page(s, alloc_gfp, node, oo);
|
|
|
|
|
|
if (page)
|
|
|
stat(s, ORDER_FALLBACK);
|
|
@@ -1468,7 +1477,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
|
page_mapcount_reset(page);
|
|
|
if (current->reclaim_state)
|
|
|
current->reclaim_state->reclaimed_slab += pages;
|
|
|
- __free_memcg_kmem_pages(page, order);
|
|
|
+ __free_pages(page, order);
|
|
|
+ memcg_uncharge_slab(s, order);
|
|
|
}
|
|
|
|
|
|
#define need_reserve_slab_rcu \
|