|
@@ -1881,7 +1881,7 @@ redo:
|
|
|
|
|
|
new.frozen = 0;
|
|
new.frozen = 0;
|
|
|
|
|
|
- if (!new.inuse && n->nr_partial > s->min_partial)
|
|
|
|
|
|
+ if (!new.inuse && n->nr_partial >= s->min_partial)
|
|
m = M_FREE;
|
|
m = M_FREE;
|
|
else if (new.freelist) {
|
|
else if (new.freelist) {
|
|
m = M_PARTIAL;
|
|
m = M_PARTIAL;
|
|
@@ -1992,7 +1992,7 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|
new.freelist, new.counters,
|
|
new.freelist, new.counters,
|
|
"unfreezing slab"));
|
|
"unfreezing slab"));
|
|
|
|
|
|
- if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
|
|
|
|
|
|
+ if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
|
|
page->next = discard_page;
|
|
page->next = discard_page;
|
|
discard_page = page;
|
|
discard_page = page;
|
|
} else {
|
|
} else {
|
|
@@ -2620,7 +2620,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- if (unlikely(!new.inuse && n->nr_partial > s->min_partial))
|
|
|
|
|
|
+ if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
|
|
goto slab_empty;
|
|
goto slab_empty;
|
|
|
|
|
|
/*
|
|
/*
|