|
@@ -749,6 +749,13 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
|
|
|
+void memcg_kmem_put_cache(struct kmem_cache *cachep);
|
|
|
+int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
|
|
|
+ struct mem_cgroup *memcg);
|
|
|
+int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
|
|
|
+void memcg_kmem_uncharge(struct page *page, int order);
|
|
|
+
|
|
|
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
|
|
extern struct static_key_false memcg_kmem_enabled_key;
|
|
|
|
|
@@ -769,22 +776,6 @@ static inline bool memcg_kmem_enabled(void)
|
|
|
return static_branch_unlikely(&memcg_kmem_enabled_key);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * In general, we'll do everything in our power to not incur in any overhead
|
|
|
- * for non-memcg users for the kmem functions. Not even a function call, if we
|
|
|
- * can avoid it.
|
|
|
- *
|
|
|
- * Therefore, we'll inline all those functions so that in the best case, we'll
|
|
|
- * see that kmemcg is off for everybody and proceed quickly. If it is on,
|
|
|
- * we'll still do most of the flag checking inline. We check a lot of
|
|
|
- * conditions, but because they are pretty simple, they are expected to be
|
|
|
- * fast.
|
|
|
- */
|
|
|
-int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
|
|
|
- struct mem_cgroup *memcg);
|
|
|
-int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
|
|
|
-void __memcg_kmem_uncharge(struct page *page, int order);
|
|
|
-
|
|
|
/*
|
|
|
* helper for accessing a memcg's index. It will be used as an index in the
|
|
|
* child cache array in kmem_cache, and also to derive its name. This function
|
|
@@ -795,67 +786,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
|
|
|
return memcg ? memcg->kmemcg_id : -1;
|
|
|
}
|
|
|
|
|
|
-struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
|
|
|
-void __memcg_kmem_put_cache(struct kmem_cache *cachep);
|
|
|
-
|
|
|
-static inline bool __memcg_kmem_bypass(void)
|
|
|
-{
|
|
|
- if (!memcg_kmem_enabled())
|
|
|
- return true;
|
|
|
- if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
|
|
|
- return true;
|
|
|
- return false;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * memcg_kmem_charge: charge a kmem page
|
|
|
- * @page: page to charge
|
|
|
- * @gfp: reclaim mode
|
|
|
- * @order: allocation order
|
|
|
- *
|
|
|
- * Returns 0 on success, an error code on failure.
|
|
|
- */
|
|
|
-static __always_inline int memcg_kmem_charge(struct page *page,
|
|
|
- gfp_t gfp, int order)
|
|
|
-{
|
|
|
- if (__memcg_kmem_bypass())
|
|
|
- return 0;
|
|
|
- if (!(gfp & __GFP_ACCOUNT))
|
|
|
- return 0;
|
|
|
- return __memcg_kmem_charge(page, gfp, order);
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * memcg_kmem_uncharge: uncharge a kmem page
|
|
|
- * @page: page to uncharge
|
|
|
- * @order: allocation order
|
|
|
- */
|
|
|
-static __always_inline void memcg_kmem_uncharge(struct page *page, int order)
|
|
|
-{
|
|
|
- if (memcg_kmem_enabled())
|
|
|
- __memcg_kmem_uncharge(page, order);
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
|
|
|
- * @cachep: the original global kmem cache
|
|
|
- *
|
|
|
- * All memory allocated from a per-memcg cache is charged to the owner memcg.
|
|
|
- */
|
|
|
-static __always_inline struct kmem_cache *
|
|
|
-memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|
|
-{
|
|
|
- if (__memcg_kmem_bypass())
|
|
|
- return cachep;
|
|
|
- return __memcg_kmem_get_cache(cachep, gfp);
|
|
|
-}
|
|
|
-
|
|
|
-static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
|
|
|
-{
|
|
|
- if (memcg_kmem_enabled())
|
|
|
- __memcg_kmem_put_cache(cachep);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* memcg_kmem_update_page_stat - update kmem page state statistics
|
|
|
* @page: the page
|
|
@@ -878,15 +808,6 @@ static inline bool memcg_kmem_enabled(void)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
|
|
|
-{
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void memcg_kmem_uncharge(struct page *page, int order)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
static inline int memcg_cache_id(struct mem_cgroup *memcg)
|
|
|
{
|
|
|
return -1;
|
|
@@ -900,16 +821,6 @@ static inline void memcg_put_cache_ids(void)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-static inline struct kmem_cache *
|
|
|
-memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|
|
-{
|
|
|
- return cachep;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
static inline void memcg_kmem_update_page_stat(struct page *page,
|
|
|
enum mem_cgroup_stat_index idx, int val)
|
|
|
{
|