|
@@ -281,6 +281,9 @@ int slab_unmergeable(struct kmem_cache *s)
|
|
|
if (s->ctor)
|
|
|
return 1;
|
|
|
|
|
|
+ if (s->usersize)
|
|
|
+ return 1;
|
|
|
+
|
|
|
/*
|
|
|
* We may have set a slab to be unmergeable during bootstrap.
|
|
|
*/
|
|
@@ -366,12 +369,16 @@ unsigned long calculate_alignment(slab_flags_t flags,
|
|
|
|
|
|
static struct kmem_cache *create_cache(const char *name,
|
|
|
size_t object_size, size_t size, size_t align,
|
|
|
- slab_flags_t flags, void (*ctor)(void *),
|
|
|
+ slab_flags_t flags, size_t useroffset,
|
|
|
+ size_t usersize, void (*ctor)(void *),
|
|
|
struct mem_cgroup *memcg, struct kmem_cache *root_cache)
|
|
|
{
|
|
|
struct kmem_cache *s;
|
|
|
int err;
|
|
|
|
|
|
+ if (WARN_ON(useroffset + usersize > object_size))
|
|
|
+ useroffset = usersize = 0;
|
|
|
+
|
|
|
err = -ENOMEM;
|
|
|
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
|
|
|
if (!s)
|
|
@@ -382,6 +389,8 @@ static struct kmem_cache *create_cache(const char *name,
|
|
|
s->size = size;
|
|
|
s->align = align;
|
|
|
s->ctor = ctor;
|
|
|
+ s->useroffset = useroffset;
|
|
|
+ s->usersize = usersize;
|
|
|
|
|
|
err = init_memcg_params(s, memcg, root_cache);
|
|
|
if (err)
|
|
@@ -406,11 +415,13 @@ out_free_cache:
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * kmem_cache_create - Create a cache.
|
|
|
+ * kmem_cache_create_usercopy - Create a cache.
|
|
|
* @name: A string which is used in /proc/slabinfo to identify this cache.
|
|
|
* @size: The size of objects to be created in this cache.
|
|
|
* @align: The required alignment for the objects.
|
|
|
* @flags: SLAB flags
|
|
|
+ * @useroffset: Usercopy region offset
|
|
|
+ * @usersize: Usercopy region size
|
|
|
* @ctor: A constructor for the objects.
|
|
|
*
|
|
|
* Returns a ptr to the cache on success, NULL on failure.
|
|
@@ -430,8 +441,9 @@ out_free_cache:
|
|
|
* as davem.
|
|
|
*/
|
|
|
struct kmem_cache *
|
|
|
-kmem_cache_create(const char *name, size_t size, size_t align,
|
|
|
- slab_flags_t flags, void (*ctor)(void *))
|
|
|
+kmem_cache_create_usercopy(const char *name, size_t size, size_t align,
|
|
|
+ slab_flags_t flags, size_t useroffset, size_t usersize,
|
|
|
+ void (*ctor)(void *))
|
|
|
{
|
|
|
struct kmem_cache *s = NULL;
|
|
|
const char *cache_name;
|
|
@@ -462,7 +474,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
|
|
|
*/
|
|
|
flags &= CACHE_CREATE_MASK;
|
|
|
|
|
|
- s = __kmem_cache_alias(name, size, align, flags, ctor);
|
|
|
+ /* Fail closed on bad usersize of useroffset values. */
|
|
|
+ if (WARN_ON(!usersize && useroffset) ||
|
|
|
+ WARN_ON(size < usersize || size - usersize < useroffset))
|
|
|
+ usersize = useroffset = 0;
|
|
|
+
|
|
|
+ if (!usersize)
|
|
|
+ s = __kmem_cache_alias(name, size, align, flags, ctor);
|
|
|
if (s)
|
|
|
goto out_unlock;
|
|
|
|
|
@@ -474,7 +492,7 @@ kmem_cache_create(const char *name, size_t size, size_t align,
|
|
|
|
|
|
s = create_cache(cache_name, size, size,
|
|
|
calculate_alignment(flags, align, size),
|
|
|
- flags, ctor, NULL, NULL);
|
|
|
+ flags, useroffset, usersize, ctor, NULL, NULL);
|
|
|
if (IS_ERR(s)) {
|
|
|
err = PTR_ERR(s);
|
|
|
kfree_const(cache_name);
|
|
@@ -500,6 +518,15 @@ out_unlock:
|
|
|
}
|
|
|
return s;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(kmem_cache_create_usercopy);
|
|
|
+
|
|
|
+struct kmem_cache *
|
|
|
+kmem_cache_create(const char *name, size_t size, size_t align,
|
|
|
+ slab_flags_t flags, void (*ctor)(void *))
|
|
|
+{
|
|
|
+ return kmem_cache_create_usercopy(name, size, align, flags, 0, size,
|
|
|
+ ctor);
|
|
|
+}
|
|
|
EXPORT_SYMBOL(kmem_cache_create);
|
|
|
|
|
|
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
|
|
@@ -612,6 +639,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
|
|
s = create_cache(cache_name, root_cache->object_size,
|
|
|
root_cache->size, root_cache->align,
|
|
|
root_cache->flags & CACHE_CREATE_MASK,
|
|
|
+ root_cache->useroffset, root_cache->usersize,
|
|
|
root_cache->ctor, memcg, root_cache);
|
|
|
/*
|
|
|
* If we could not create a memcg cache, do not complain, because
|
|
@@ -879,13 +907,15 @@ bool slab_is_available(void)
|
|
|
#ifndef CONFIG_SLOB
|
|
|
/* Create a cache during boot when no slab services are available yet */
|
|
|
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
|
|
|
- slab_flags_t flags)
|
|
|
+ slab_flags_t flags, size_t useroffset, size_t usersize)
|
|
|
{
|
|
|
int err;
|
|
|
|
|
|
s->name = name;
|
|
|
s->size = s->object_size = size;
|
|
|
s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
|
|
|
+ s->useroffset = useroffset;
|
|
|
+ s->usersize = usersize;
|
|
|
|
|
|
slab_init_memcg_params(s);
|
|
|
|
|
@@ -906,7 +936,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
|
|
|
if (!s)
|
|
|
panic("Out of memory when creating slab %s\n", name);
|
|
|
|
|
|
- create_boot_cache(s, name, size, flags);
|
|
|
+ create_boot_cache(s, name, size, flags, 0, size);
|
|
|
list_add(&s->list, &slab_caches);
|
|
|
memcg_link_cache(s);
|
|
|
s->refcount = 1;
|