浏览代码

mm/sl[aou]b: Do slab aliasing call from common code

The slab aliasing logic causes some strange contortions in slub. So add
a call to deal with aliases to slab_common.c but disable it for other
slab allocators by providng stubs that fail to create aliases.

Full general support for aliases will require additional cleanup passes
and more standardization of fields in kmem_cache.

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Christoph Lameter 13 年之前
父节点
当前提交
cbb79694d5
共有 3 个文件被更改,包括 25 次插入4 次删除
  1. 10 0
      mm/slab.h
  2. 4 0
      mm/slab_common.c
  3. 11 4
      mm/slub.c

+ 10 - 0
mm/slab.h

@@ -36,6 +36,16 @@ extern struct kmem_cache *kmem_cache;
 struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
 struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
 	size_t align, unsigned long flags, void (*ctor)(void *));
 	size_t align, unsigned long flags, void (*ctor)(void *));
 
 
+#ifdef CONFIG_SLUB
+struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
+	size_t align, unsigned long flags, void (*ctor)(void *));
+#else
+static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
+	size_t align, unsigned long flags, void (*ctor)(void *))
+{ return NULL; }
+#endif
+
+
 int __kmem_cache_shutdown(struct kmem_cache *);
 int __kmem_cache_shutdown(struct kmem_cache *);
 
 
 #endif
 #endif

+ 4 - 0
mm/slab_common.c

@@ -115,6 +115,10 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
 		goto out_locked;
 		goto out_locked;
 	}
 	}
 
 
+	s = __kmem_cache_alias(name, size, align, flags, ctor);
+	if (s)
+		goto out_locked;
+
 	s = __kmem_cache_create(n, size, align, flags, ctor);
 	s = __kmem_cache_create(n, size, align, flags, ctor);
 
 
 	if (s) {
 	if (s) {

+ 11 - 4
mm/slub.c

@@ -3708,7 +3708,7 @@ void __init kmem_cache_init(void)
 		slub_max_order = 0;
 		slub_max_order = 0;
 
 
 	kmem_size = offsetof(struct kmem_cache, node) +
 	kmem_size = offsetof(struct kmem_cache, node) +
-				nr_node_ids * sizeof(struct kmem_cache_node *);
+			nr_node_ids * sizeof(struct kmem_cache_node *);
 
 
 	/* Allocate two kmem_caches from the page allocator */
 	/* Allocate two kmem_caches from the page allocator */
 	kmalloc_size = ALIGN(kmem_size, cache_line_size());
 	kmalloc_size = ALIGN(kmem_size, cache_line_size());
@@ -3922,7 +3922,7 @@ static struct kmem_cache *find_mergeable(size_t size,
 	return NULL;
 	return NULL;
 }
 }
 
 
-struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
+struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
 		size_t align, unsigned long flags, void (*ctor)(void *))
 		size_t align, unsigned long flags, void (*ctor)(void *))
 {
 {
 	struct kmem_cache *s;
 	struct kmem_cache *s;
@@ -3939,11 +3939,18 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
 
 
 		if (sysfs_slab_alias(s, name)) {
 		if (sysfs_slab_alias(s, name)) {
 			s->refcount--;
 			s->refcount--;
-			return NULL;
+			s = NULL;
 		}
 		}
-		return s;
 	}
 	}
 
 
+	return s;
+}
+
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
+		size_t align, unsigned long flags, void (*ctor)(void *))
+{
+	struct kmem_cache *s;
+
 	s = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
 	s = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
 	if (s) {
 	if (s) {
 		if (kmem_cache_open(s, name,
 		if (kmem_cache_open(s, name,