|
@@ -2635,7 +2635,6 @@ static void memcg_register_cache(struct mem_cgroup *memcg,
|
|
|
if (!cachep)
|
|
|
return;
|
|
|
|
|
|
- css_get(&memcg->css);
|
|
|
list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
|
|
|
|
|
|
/*
|
|
@@ -2669,9 +2668,6 @@ static void memcg_unregister_cache(struct kmem_cache *cachep)
|
|
|
list_del(&cachep->memcg_params->list);
|
|
|
|
|
|
kmem_cache_destroy(cachep);
|
|
|
-
|
|
|
- /* drop the reference taken in memcg_register_cache */
|
|
|
- css_put(&memcg->css);
|
|
|
}
|
|
|
|
|
|
int __memcg_cleanup_cache_params(struct kmem_cache *s)
|
|
@@ -2705,9 +2701,7 @@ static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
|
|
|
mutex_lock(&memcg_slab_mutex);
|
|
|
list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
|
|
|
cachep = memcg_params_to_cache(params);
|
|
|
- kmem_cache_shrink(cachep);
|
|
|
- if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
|
|
|
- memcg_unregister_cache(cachep);
|
|
|
+ memcg_unregister_cache(cachep);
|
|
|
}
|
|
|
mutex_unlock(&memcg_slab_mutex);
|
|
|
}
|
|
@@ -2742,10 +2736,10 @@ static void __memcg_schedule_register_cache(struct mem_cgroup *memcg,
|
|
|
struct memcg_register_cache_work *cw;
|
|
|
|
|
|
cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
|
|
|
- if (cw == NULL) {
|
|
|
- css_put(&memcg->css);
|
|
|
+ if (!cw)
|
|
|
return;
|
|
|
- }
|
|
|
+
|
|
|
+ css_get(&memcg->css);
|
|
|
|
|
|
cw->memcg = memcg;
|
|
|
cw->cachep = cachep;
|
|
@@ -2776,12 +2770,8 @@ static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
|
|
|
int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
|
|
|
{
|
|
|
unsigned int nr_pages = 1 << order;
|
|
|
- int res;
|
|
|
|
|
|
- res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp, nr_pages);
|
|
|
- if (!res)
|
|
|
- atomic_add(nr_pages, &cachep->memcg_params->nr_pages);
|
|
|
- return res;
|
|
|
+ return memcg_charge_kmem(cachep->memcg_params->memcg, gfp, nr_pages);
|
|
|
}
|
|
|
|
|
|
void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
|
|
@@ -2789,7 +2779,6 @@ void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
|
|
|
unsigned int nr_pages = 1 << order;
|
|
|
|
|
|
memcg_uncharge_kmem(cachep->memcg_params->memcg, nr_pages);
|
|
|
- atomic_sub(nr_pages, &cachep->memcg_params->nr_pages);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2816,22 +2805,13 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
|
|
|
if (current->memcg_kmem_skip_account)
|
|
|
return cachep;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
- memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
|
|
|
-
|
|
|
+ memcg = get_mem_cgroup_from_mm(current->mm);
|
|
|
if (!memcg_kmem_is_active(memcg))
|
|
|
goto out;
|
|
|
|
|
|
memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
|
|
|
- if (likely(memcg_cachep)) {
|
|
|
- cachep = memcg_cachep;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- /* The corresponding put will be done in the workqueue. */
|
|
|
- if (!css_tryget_online(&memcg->css))
|
|
|
- goto out;
|
|
|
- rcu_read_unlock();
|
|
|
+ if (likely(memcg_cachep))
|
|
|
+ return memcg_cachep;
|
|
|
|
|
|
/*
|
|
|
* If we are in a safe context (can wait, and not in interrupt
|
|
@@ -2846,12 +2826,17 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
|
|
|
* defer everything.
|
|
|
*/
|
|
|
memcg_schedule_register_cache(memcg, cachep);
|
|
|
- return cachep;
|
|
|
out:
|
|
|
- rcu_read_unlock();
|
|
|
+ css_put(&memcg->css);
|
|
|
return cachep;
|
|
|
}
|
|
|
|
|
|
+void __memcg_kmem_put_cache(struct kmem_cache *cachep)
|
|
|
+{
|
|
|
+ if (!is_root_cache(cachep))
|
|
|
+ css_put(&cachep->memcg_params->memcg->css);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* We need to verify if the allocation against current->mm->owner's memcg is
|
|
|
* possible for the given order. But the page is not allocated yet, so we'll
|
|
@@ -2914,10 +2899,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
|
|
|
memcg_uncharge_kmem(memcg, 1 << order);
|
|
|
page->mem_cgroup = NULL;
|
|
|
}
|
|
|
-#else
|
|
|
-static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
|
|
|
-{
|
|
|
-}
|
|
|
#endif /* CONFIG_MEMCG_KMEM */
|
|
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
@@ -4188,6 +4169,7 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
|
|
|
|
|
|
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|
|
|
{
|
|
|
+ memcg_unregister_all_caches(memcg);
|
|
|
mem_cgroup_sockets_destroy(memcg);
|
|
|
}
|
|
|
#else
|
|
@@ -4797,7 +4779,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
|
|
|
}
|
|
|
spin_unlock(&memcg->event_list_lock);
|
|
|
|
|
|
- memcg_unregister_all_caches(memcg);
|
|
|
vmpressure_cleanup(&memcg->vmpressure);
|
|
|
}
|
|
|
|