|
@@ -895,12 +895,6 @@ static int init_cache_node_node(int node)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static inline int slabs_tofree(struct kmem_cache *cachep,
|
|
|
- struct kmem_cache_node *n)
|
|
|
-{
|
|
|
- return (n->free_objects + cachep->num - 1) / cachep->num;
|
|
|
-}
|
|
|
-
|
|
|
static void cpuup_canceled(long cpu)
|
|
|
{
|
|
|
struct kmem_cache *cachep;
|
|
@@ -965,7 +959,7 @@ free_slab:
|
|
|
n = get_node(cachep, node);
|
|
|
if (!n)
|
|
|
continue;
|
|
|
- drain_freelist(cachep, n, slabs_tofree(cachep, n));
|
|
|
+ drain_freelist(cachep, n, INT_MAX);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1117,7 +1111,7 @@ static int __meminit drain_cache_node_node(int node)
|
|
|
if (!n)
|
|
|
continue;
|
|
|
|
|
|
- drain_freelist(cachep, n, slabs_tofree(cachep, n));
|
|
|
+ drain_freelist(cachep, n, INT_MAX);
|
|
|
|
|
|
if (!list_empty(&n->slabs_full) ||
|
|
|
!list_empty(&n->slabs_partial)) {
|
|
@@ -2311,7 +2305,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
|
|
|
|
|
|
check_irq_on();
|
|
|
for_each_kmem_cache_node(cachep, node, n) {
|
|
|
- drain_freelist(cachep, n, slabs_tofree(cachep, n));
|
|
|
+ drain_freelist(cachep, n, INT_MAX);
|
|
|
|
|
|
ret += !list_empty(&n->slabs_full) ||
|
|
|
!list_empty(&n->slabs_partial);
|