|
@@ -169,16 +169,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
|
|
|
*/
|
|
|
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
|
|
|
|
|
|
-/*
|
|
|
- * Set of flags that will prevent slab merging
|
|
|
- */
|
|
|
-#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
|
|
|
- SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
|
|
|
- SLAB_FAILSLAB)
|
|
|
-
|
|
|
-#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
|
|
|
- SLAB_CACHE_DMA | SLAB_NOTRACK)
|
|
|
-
|
|
|
#define OO_SHIFT 16
|
|
|
#define OO_MASK ((1 << OO_SHIFT) - 1)
|
|
|
#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
|
|
@@ -1176,7 +1166,7 @@ out:
|
|
|
|
|
|
__setup("slub_debug", setup_slub_debug);
|
|
|
|
|
|
-static unsigned long kmem_cache_flags(unsigned long object_size,
|
|
|
+unsigned long kmem_cache_flags(unsigned long object_size,
|
|
|
unsigned long flags, const char *name,
|
|
|
void (*ctor)(void *))
|
|
|
{
|
|
@@ -1208,7 +1198,7 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
|
struct page *page) {}
|
|
|
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
|
struct page *page) {}
|
|
|
-static inline unsigned long kmem_cache_flags(unsigned long object_size,
|
|
|
+unsigned long kmem_cache_flags(unsigned long object_size,
|
|
|
unsigned long flags, const char *name,
|
|
|
void (*ctor)(void *))
|
|
|
{
|
|
@@ -2718,12 +2708,6 @@ static int slub_min_order;
|
|
|
static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
|
|
|
static int slub_min_objects;
|
|
|
|
|
|
-/*
|
|
|
- * Merge control. If this is set then no merging of slab caches will occur.
|
|
|
- * (Could be removed. This was introduced to pacify the merge skeptics.)
|
|
|
- */
|
|
|
-static int slub_nomerge;
|
|
|
-
|
|
|
/*
|
|
|
* Calculate the order of allocation given an slab object size.
|
|
|
*
|
|
@@ -3252,14 +3236,6 @@ static int __init setup_slub_min_objects(char *str)
|
|
|
|
|
|
__setup("slub_min_objects=", setup_slub_min_objects);
|
|
|
|
|
|
-static int __init setup_slub_nomerge(char *str)
|
|
|
-{
|
|
|
- slub_nomerge = 1;
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
-__setup("slub_nomerge", setup_slub_nomerge);
|
|
|
-
|
|
|
void *__kmalloc(size_t size, gfp_t flags)
|
|
|
{
|
|
|
struct kmem_cache *s;
|
|
@@ -3637,69 +3613,6 @@ void __init kmem_cache_init_late(void)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Find a mergeable slab cache
|
|
|
- */
|
|
|
-static int slab_unmergeable(struct kmem_cache *s)
|
|
|
-{
|
|
|
- if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
|
|
|
- return 1;
|
|
|
-
|
|
|
- if (!is_root_cache(s))
|
|
|
- return 1;
|
|
|
-
|
|
|
- if (s->ctor)
|
|
|
- return 1;
|
|
|
-
|
|
|
- /*
|
|
|
- * We may have set a slab to be unmergeable during bootstrap.
|
|
|
- */
|
|
|
- if (s->refcount < 0)
|
|
|
- return 1;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static struct kmem_cache *find_mergeable(size_t size, size_t align,
|
|
|
- unsigned long flags, const char *name, void (*ctor)(void *))
|
|
|
-{
|
|
|
- struct kmem_cache *s;
|
|
|
-
|
|
|
- if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
|
|
|
- return NULL;
|
|
|
-
|
|
|
- if (ctor)
|
|
|
- return NULL;
|
|
|
-
|
|
|
- size = ALIGN(size, sizeof(void *));
|
|
|
- align = calculate_alignment(flags, align, size);
|
|
|
- size = ALIGN(size, align);
|
|
|
- flags = kmem_cache_flags(size, flags, name, NULL);
|
|
|
-
|
|
|
- list_for_each_entry(s, &slab_caches, list) {
|
|
|
- if (slab_unmergeable(s))
|
|
|
- continue;
|
|
|
-
|
|
|
- if (size > s->size)
|
|
|
- continue;
|
|
|
-
|
|
|
- if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
|
|
|
- continue;
|
|
|
- /*
|
|
|
- * Check if alignment is compatible.
|
|
|
- * Courtesy of Adrian Drzewiecki
|
|
|
- */
|
|
|
- if ((s->size & ~(align - 1)) != s->size)
|
|
|
- continue;
|
|
|
-
|
|
|
- if (s->size - size >= sizeof(void *))
|
|
|
- continue;
|
|
|
-
|
|
|
- return s;
|
|
|
- }
|
|
|
- return NULL;
|
|
|
-}
|
|
|
-
|
|
|
struct kmem_cache *
|
|
|
__kmem_cache_alias(const char *name, size_t size, size_t align,
|
|
|
unsigned long flags, void (*ctor)(void *))
|