|
@@ -157,6 +157,17 @@
|
|
|
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
|
|
|
#endif
|
|
|
|
|
|
+#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
|
|
|
+ <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
|
|
|
+
|
|
|
+#if FREELIST_BYTE_INDEX
|
|
|
+typedef unsigned char freelist_idx_t;
|
|
|
+#else
|
|
|
+typedef unsigned short freelist_idx_t;
|
|
|
+#endif
|
|
|
+
|
|
|
+#define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE)
|
|
|
+
|
|
|
/*
|
|
|
* true if a page was allocated from pfmemalloc reserves for network-based
|
|
|
* swap
|
|
@@ -2016,6 +2027,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
|
|
|
if (!num)
|
|
|
continue;
|
|
|
|
|
|
+ /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
|
|
|
+ if (num > SLAB_OBJ_MAX_NUM)
|
|
|
+ break;
|
|
|
+
|
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
|
/*
|
|
|
* Max number of objs-per-slab for caches which
|
|
@@ -2258,6 +2273,12 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
|
flags |= CFLGS_OFF_SLAB;
|
|
|
|
|
|
size = ALIGN(size, cachep->align);
|
|
|
+ /*
|
|
|
+ * We should restrict the number of objects in a slab to implement
|
|
|
+ * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
|
|
|
+ */
|
|
|
+ if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
|
|
|
+ size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
|
|
|
|
|
|
left_over = calculate_slab_order(cachep, size, cachep->align, flags);
|
|
|
|