|
@@ -3894,7 +3894,7 @@ EXPORT_SYMBOL(kfree);
|
|
|
* being allocated from last increasing the chance that the last objects
|
|
|
* are freed in them.
|
|
|
*/
|
|
|
-int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
|
|
|
+int __kmem_cache_shrink(struct kmem_cache *s)
|
|
|
{
|
|
|
int node;
|
|
|
int i;
|
|
@@ -3906,21 +3906,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
|
|
|
unsigned long flags;
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (deactivate) {
|
|
|
- /*
|
|
|
- * Disable empty slabs caching. Used to avoid pinning offline
|
|
|
- * memory cgroups by kmem pages that can be freed.
|
|
|
- */
|
|
|
- s->cpu_partial = 0;
|
|
|
- s->min_partial = 0;
|
|
|
-
|
|
|
- /*
|
|
|
- * s->cpu_partial is checked locklessly (see put_cpu_partial),
|
|
|
- * so we have to make sure the change is visible.
|
|
|
- */
|
|
|
- synchronize_sched();
|
|
|
- }
|
|
|
-
|
|
|
flush_all(s);
|
|
|
for_each_kmem_cache_node(s, node, n) {
|
|
|
INIT_LIST_HEAD(&discard);
|
|
@@ -3971,13 +3956,33 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_MEMCG
|
|
|
+void __kmemcg_cache_deactivate(struct kmem_cache *s)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Disable empty slabs caching. Used to avoid pinning offline
|
|
|
+ * memory cgroups by kmem pages that can be freed.
|
|
|
+ */
|
|
|
+ s->cpu_partial = 0;
|
|
|
+ s->min_partial = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * s->cpu_partial is checked locklessly (see put_cpu_partial), so
|
|
|
+ * we have to make sure the change is visible.
|
|
|
+ */
|
|
|
+ synchronize_sched();
|
|
|
+
|
|
|
+ __kmem_cache_shrink(s);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static int slab_mem_going_offline_callback(void *arg)
|
|
|
{
|
|
|
struct kmem_cache *s;
|
|
|
|
|
|
mutex_lock(&slab_mutex);
|
|
|
list_for_each_entry(s, &slab_caches, list)
|
|
|
- __kmem_cache_shrink(s, false);
|
|
|
+ __kmem_cache_shrink(s);
|
|
|
mutex_unlock(&slab_mutex);
|
|
|
|
|
|
return 0;
|