|
@@ -3277,60 +3277,11 @@ static void kmem_cache_destroy_work_func(struct work_struct *w)
|
|
|
|
|
|
cachep = memcg_params_to_cache(p);
|
|
|
|
|
|
- /*
|
|
|
- * If we get down to 0 after shrink, we could delete right away.
|
|
|
- * However, memcg_release_pages() already puts us back in the workqueue
|
|
|
- * in that case. If we proceed deleting, we'll get a dangling
|
|
|
- * reference, and removing the object from the workqueue in that case
|
|
|
- * is unnecessary complication. We are not a fast path.
|
|
|
- *
|
|
|
- * Note that this case is fundamentally different from racing with
|
|
|
- * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
|
|
|
- * kmem_cache_shrink, not only we would be reinserting a dead cache
|
|
|
- * into the queue, but doing so from inside the worker racing to
|
|
|
- * destroy it.
|
|
|
- *
|
|
|
- * So if we aren't down to zero, we'll just schedule a worker and try
|
|
|
- * again
|
|
|
- */
|
|
|
- if (atomic_read(&cachep->memcg_params->nr_pages) != 0)
|
|
|
- kmem_cache_shrink(cachep);
|
|
|
- else
|
|
|
+ kmem_cache_shrink(cachep);
|
|
|
+ if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
|
|
|
kmem_cache_destroy(cachep);
|
|
|
}
|
|
|
|
|
|
-void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
|
|
|
-{
|
|
|
- if (!cachep->memcg_params->dead)
|
|
|
- return;
|
|
|
-
|
|
|
- /*
|
|
|
- * There are many ways in which we can get here.
|
|
|
- *
|
|
|
- * We can get to a memory-pressure situation while the delayed work is
|
|
|
- * still pending to run. The vmscan shrinkers can then release all
|
|
|
- * cache memory and get us to destruction. If this is the case, we'll
|
|
|
- * be executed twice, which is a bug (the second time will execute over
|
|
|
- * bogus data). In this case, cancelling the work should be fine.
|
|
|
- *
|
|
|
- * But we can also get here from the worker itself, if
|
|
|
- * kmem_cache_shrink is enough to shake all the remaining objects and
|
|
|
- * get the page count to 0. In this case, we'll deadlock if we try to
|
|
|
- * cancel the work (the worker runs with an internal lock held, which
|
|
|
- * is the same lock we would hold for cancel_work_sync().)
|
|
|
- *
|
|
|
- * Since we can't possibly know who got us here, just refrain from
|
|
|
- * running if there is already work pending
|
|
|
- */
|
|
|
- if (work_pending(&cachep->memcg_params->destroy))
|
|
|
- return;
|
|
|
- /*
|
|
|
- * We have to defer the actual destroying to a workqueue, because
|
|
|
- * we might currently be in a context that cannot sleep.
|
|
|
- */
|
|
|
- schedule_work(&cachep->memcg_params->destroy);
|
|
|
-}
|
|
|
-
|
|
|
int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
|
|
{
|
|
|
struct kmem_cache *c;
|
|
@@ -3356,16 +3307,7 @@ int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
|
|
* We will now manually delete the caches, so to avoid races
|
|
|
* we need to cancel all pending destruction workers and
|
|
|
* proceed with destruction ourselves.
|
|
|
- *
|
|
|
- * kmem_cache_destroy() will call kmem_cache_shrink internally,
|
|
|
- * and that could spawn the workers again: it is likely that
|
|
|
- * the cache still have active pages until this very moment.
|
|
|
- * This would lead us back to mem_cgroup_destroy_cache.
|
|
|
- *
|
|
|
- * But that will not execute at all if the "dead" flag is not
|
|
|
- * set, so flip it down to guarantee we are in control.
|
|
|
*/
|
|
|
- c->memcg_params->dead = false;
|
|
|
cancel_work_sync(&c->memcg_params->destroy);
|
|
|
kmem_cache_destroy(c);
|
|
|
|
|
@@ -3387,7 +3329,6 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
|
|
|
mutex_lock(&memcg->slab_caches_mutex);
|
|
|
list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
|
|
|
cachep = memcg_params_to_cache(params);
|
|
|
- cachep->memcg_params->dead = true;
|
|
|
schedule_work(&cachep->memcg_params->destroy);
|
|
|
}
|
|
|
mutex_unlock(&memcg->slab_caches_mutex);
|