|
@@ -272,7 +272,6 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
|
|
|
|
|
|
#define CFLGS_OFF_SLAB (0x80000000UL)
|
|
#define CFLGS_OFF_SLAB (0x80000000UL)
|
|
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
|
|
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
|
|
-#define OFF_SLAB_MIN_SIZE (max_t(size_t, PAGE_SIZE >> 5, KMALLOC_MIN_SIZE + 1))
|
|
|
|
|
|
|
|
#define BATCHREFILL_LIMIT 16
|
|
#define BATCHREFILL_LIMIT 16
|
|
/*
|
|
/*
|
|
@@ -1879,7 +1878,6 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
|
|
static size_t calculate_slab_order(struct kmem_cache *cachep,
|
|
static size_t calculate_slab_order(struct kmem_cache *cachep,
|
|
size_t size, unsigned long flags)
|
|
size_t size, unsigned long flags)
|
|
{
|
|
{
|
|
- unsigned long offslab_limit;
|
|
|
|
size_t left_over = 0;
|
|
size_t left_over = 0;
|
|
int gfporder;
|
|
int gfporder;
|
|
|
|
|
|
@@ -1896,16 +1894,24 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
|
|
break;
|
|
break;
|
|
|
|
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
|
|
+ struct kmem_cache *freelist_cache;
|
|
|
|
+ size_t freelist_size;
|
|
|
|
+
|
|
|
|
+ freelist_size = num * sizeof(freelist_idx_t);
|
|
|
|
+ freelist_cache = kmalloc_slab(freelist_size, 0u);
|
|
|
|
+ if (!freelist_cache)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
- * Max number of objs-per-slab for caches which
|
|
|
|
- * use off-slab slabs. Needed to avoid a possible
|
|
|
|
- * looping condition in cache_grow().
|
|
|
|
|
|
+ * Needed to avoid possible looping condition
|
|
|
|
+ * in cache_grow()
|
|
*/
|
|
*/
|
|
- offslab_limit = size;
|
|
|
|
- offslab_limit /= sizeof(freelist_idx_t);
|
|
|
|
|
|
+ if (OFF_SLAB(freelist_cache))
|
|
|
|
+ continue;
|
|
|
|
|
|
- if (num > offslab_limit)
|
|
|
|
- break;
|
|
|
|
|
|
+ /* check if off slab has enough benefit */
|
|
|
|
+ if (freelist_cache->size > cachep->size / 2)
|
|
|
|
+ continue;
|
|
}
|
|
}
|
|
|
|
|
|
/* Found something acceptable - save it away */
|
|
/* Found something acceptable - save it away */
|
|
@@ -2031,17 +2037,9 @@ static bool set_off_slab_cache(struct kmem_cache *cachep,
|
|
cachep->num = 0;
|
|
cachep->num = 0;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Determine if the slab management is 'on' or 'off' slab.
|
|
|
|
- * (bootstrapping cannot cope with offslab caches so don't do
|
|
|
|
- * it too early on. Always use on-slab management when
|
|
|
|
- * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
|
|
|
|
|
|
+ * Always use on-slab management when SLAB_NOLEAKTRACE
|
|
|
|
+ * to avoid recursive calls into kmemleak.
|
|
*/
|
|
*/
|
|
- if (size < OFF_SLAB_MIN_SIZE)
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (slab_early_init)
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
if (flags & SLAB_NOLEAKTRACE)
|
|
if (flags & SLAB_NOLEAKTRACE)
|
|
return false;
|
|
return false;
|
|
|
|
|
|
@@ -2205,7 +2203,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
* sized slab is initialized in current slab initialization sequence.
|
|
* sized slab is initialized in current slab initialization sequence.
|
|
*/
|
|
*/
|
|
if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
|
|
if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
|
|
- !slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
|
|
|
|
size >= 256 && cachep->object_size > cache_line_size()) {
|
|
size >= 256 && cachep->object_size > cache_line_size()) {
|
|
if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
|
|
if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
|
|
size_t tmp_size = ALIGN(size, PAGE_SIZE);
|
|
size_t tmp_size = ALIGN(size, PAGE_SIZE);
|
|
@@ -2254,14 +2251,6 @@ done:
|
|
if (OFF_SLAB(cachep)) {
|
|
if (OFF_SLAB(cachep)) {
|
|
cachep->freelist_cache =
|
|
cachep->freelist_cache =
|
|
kmalloc_slab(cachep->freelist_size, 0u);
|
|
kmalloc_slab(cachep->freelist_size, 0u);
|
|
- /*
|
|
|
|
- * This is a possibility for one of the kmalloc_{dma,}_caches.
|
|
|
|
- * But since we go off slab only for object size greater than
|
|
|
|
- * OFF_SLAB_MIN_SIZE, and kmalloc_{dma,}_caches get created
|
|
|
|
- * in ascending order,this should not happen at all.
|
|
|
|
- * But leave a BUG_ON for some lucky dude.
|
|
|
|
- */
|
|
|
|
- BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
|
|
|
|
}
|
|
}
|
|
|
|
|
|
err = setup_cpu_cache(cachep, gfp);
|
|
err = setup_cpu_cache(cachep, gfp);
|