|
@@ -3296,6 +3296,9 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
struct kmem_cache_node *n = get_node(cachep, node);
|
|
struct kmem_cache_node *n = get_node(cachep, node);
|
|
|
|
+ struct page *page;
|
|
|
|
+
|
|
|
|
+ n->free_objects += nr_objects;
|
|
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
for (i = 0; i < nr_objects; i++) {
|
|
void *objp;
|
|
void *objp;
|
|
@@ -3308,17 +3311,11 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
|
|
check_spinlock_acquired_node(cachep, node);
|
|
check_spinlock_acquired_node(cachep, node);
|
|
slab_put_obj(cachep, page, objp);
|
|
slab_put_obj(cachep, page, objp);
|
|
STATS_DEC_ACTIVE(cachep);
|
|
STATS_DEC_ACTIVE(cachep);
|
|
- n->free_objects++;
|
|
|
|
|
|
|
|
/* fixup slab chains */
|
|
/* fixup slab chains */
|
|
- if (page->active == 0) {
|
|
|
|
- if (n->free_objects > n->free_limit) {
|
|
|
|
- n->free_objects -= cachep->num;
|
|
|
|
- list_add_tail(&page->lru, list);
|
|
|
|
- } else {
|
|
|
|
- list_add(&page->lru, &n->slabs_free);
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
|
|
+ if (page->active == 0)
|
|
|
|
+ list_add(&page->lru, &n->slabs_free);
|
|
|
|
+ else {
|
|
/* Unconditionally move a slab to the end of the
|
|
/* Unconditionally move a slab to the end of the
|
|
* partial list on free - maximum time for the
|
|
* partial list on free - maximum time for the
|
|
* other objects to be freed, too.
|
|
* other objects to be freed, too.
|
|
@@ -3326,6 +3323,14 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
|
|
list_add_tail(&page->lru, &n->slabs_partial);
|
|
list_add_tail(&page->lru, &n->slabs_partial);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
|
|
|
|
+ n->free_objects -= cachep->num;
|
|
|
|
+
|
|
|
|
+ page = list_last_entry(&n->slabs_free, struct page, lru);
|
|
|
|
+ list_del(&page->lru);
|
|
|
|
+ list_add(&page->lru, list);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
|
|
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
|