|
@@ -157,6 +157,17 @@
|
|
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
|
|
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
|
|
|
|
+ <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
|
|
|
|
+
|
|
|
|
+#if FREELIST_BYTE_INDEX
|
|
|
|
+typedef unsigned char freelist_idx_t;
|
|
|
|
+#else
|
|
|
|
+typedef unsigned short freelist_idx_t;
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE)
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* true if a page was allocated from pfmemalloc reserves for network-based
|
|
* true if a page was allocated from pfmemalloc reserves for network-based
|
|
* swap
|
|
* swap
|
|
@@ -277,8 +288,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
|
|
* OTOH the cpuarrays can contain lots of objects,
|
|
* OTOH the cpuarrays can contain lots of objects,
|
|
* which could lock up otherwise freeable slabs.
|
|
* which could lock up otherwise freeable slabs.
|
|
*/
|
|
*/
|
|
-#define REAPTIMEOUT_CPUC (2*HZ)
|
|
|
|
-#define REAPTIMEOUT_LIST3 (4*HZ)
|
|
|
|
|
|
+#define REAPTIMEOUT_AC (2*HZ)
|
|
|
|
+#define REAPTIMEOUT_NODE (4*HZ)
|
|
|
|
|
|
#if STATS
|
|
#if STATS
|
|
#define STATS_INC_ACTIVE(x) ((x)->num_active++)
|
|
#define STATS_INC_ACTIVE(x) ((x)->num_active++)
|
|
@@ -565,9 +576,31 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
|
|
return cachep->array[smp_processor_id()];
|
|
return cachep->array[smp_processor_id()];
|
|
}
|
|
}
|
|
|
|
|
|
-static size_t slab_mgmt_size(size_t nr_objs, size_t align)
|
|
|
|
|
|
+static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
|
|
|
|
+ size_t idx_size, size_t align)
|
|
{
|
|
{
|
|
- return ALIGN(nr_objs * sizeof(unsigned int), align);
|
|
|
|
|
|
+ int nr_objs;
|
|
|
|
+ size_t freelist_size;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Ignore padding for the initial guess. The padding
|
|
|
|
+ * is at most @align-1 bytes, and @buffer_size is at
|
|
|
|
+ * least @align. In the worst case, this result will
|
|
|
|
+ * be one greater than the number of objects that fit
|
|
|
|
+ * into the memory allocation when taking the padding
|
|
|
|
+ * into account.
|
|
|
|
+ */
|
|
|
|
+ nr_objs = slab_size / (buffer_size + idx_size);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * This calculated number will be either the right
|
|
|
|
+ * amount, or one greater than what we want.
|
|
|
|
+ */
|
|
|
|
+ freelist_size = slab_size - nr_objs * buffer_size;
|
|
|
|
+ if (freelist_size < ALIGN(nr_objs * idx_size, align))
|
|
|
|
+ nr_objs--;
|
|
|
|
+
|
|
|
|
+ return nr_objs;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -600,25 +633,9 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
|
|
nr_objs = slab_size / buffer_size;
|
|
nr_objs = slab_size / buffer_size;
|
|
|
|
|
|
} else {
|
|
} else {
|
|
- /*
|
|
|
|
- * Ignore padding for the initial guess. The padding
|
|
|
|
- * is at most @align-1 bytes, and @buffer_size is at
|
|
|
|
- * least @align. In the worst case, this result will
|
|
|
|
- * be one greater than the number of objects that fit
|
|
|
|
- * into the memory allocation when taking the padding
|
|
|
|
- * into account.
|
|
|
|
- */
|
|
|
|
- nr_objs = (slab_size) / (buffer_size + sizeof(unsigned int));
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * This calculated number will be either the right
|
|
|
|
- * amount, or one greater than what we want.
|
|
|
|
- */
|
|
|
|
- if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
|
|
|
|
- > slab_size)
|
|
|
|
- nr_objs--;
|
|
|
|
-
|
|
|
|
- mgmt_size = slab_mgmt_size(nr_objs, align);
|
|
|
|
|
|
+ nr_objs = calculate_nr_objs(slab_size, buffer_size,
|
|
|
|
+ sizeof(freelist_idx_t), align);
|
|
|
|
+ mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
|
|
}
|
|
}
|
|
*num = nr_objs;
|
|
*num = nr_objs;
|
|
*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
|
|
*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
|
|
@@ -1067,7 +1084,7 @@ static int init_cache_node_node(int node)
|
|
|
|
|
|
list_for_each_entry(cachep, &slab_caches, list) {
|
|
list_for_each_entry(cachep, &slab_caches, list) {
|
|
/*
|
|
/*
|
|
- * Set up the size64 kmemlist for cpu before we can
|
|
|
|
|
|
+ * Set up the kmem_cache_node for cpu before we can
|
|
* begin anything. Make sure some other cpu on this
|
|
* begin anything. Make sure some other cpu on this
|
|
* node has not already allocated this
|
|
* node has not already allocated this
|
|
*/
|
|
*/
|
|
@@ -1076,12 +1093,12 @@ static int init_cache_node_node(int node)
|
|
if (!n)
|
|
if (!n)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
kmem_cache_node_init(n);
|
|
kmem_cache_node_init(n);
|
|
- n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
|
|
|
|
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
|
|
|
|
|
|
+ n->next_reap = jiffies + REAPTIMEOUT_NODE +
|
|
|
|
+ ((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * The l3s don't come and go as CPUs come and
|
|
|
|
- * go. slab_mutex is sufficient
|
|
|
|
|
|
+ * The kmem_cache_nodes don't come and go as CPUs
|
|
|
|
+ * come and go. slab_mutex is sufficient
|
|
* protection here.
|
|
* protection here.
|
|
*/
|
|
*/
|
|
cachep->node[node] = n;
|
|
cachep->node[node] = n;
|
|
@@ -1406,8 +1423,8 @@ static void __init set_up_node(struct kmem_cache *cachep, int index)
|
|
for_each_online_node(node) {
|
|
for_each_online_node(node) {
|
|
cachep->node[node] = &init_kmem_cache_node[index + node];
|
|
cachep->node[node] = &init_kmem_cache_node[index + node];
|
|
cachep->node[node]->next_reap = jiffies +
|
|
cachep->node[node]->next_reap = jiffies +
|
|
- REAPTIMEOUT_LIST3 +
|
|
|
|
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
|
|
|
|
|
|
+ REAPTIMEOUT_NODE +
|
|
|
|
+ ((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2010,6 +2027,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
|
|
if (!num)
|
|
if (!num)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
|
|
+ /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
|
|
|
|
+ if (num > SLAB_OBJ_MAX_NUM)
|
|
|
|
+ break;
|
|
|
|
+
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
/*
|
|
/*
|
|
* Max number of objs-per-slab for caches which
|
|
* Max number of objs-per-slab for caches which
|
|
@@ -2017,7 +2038,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
|
|
* looping condition in cache_grow().
|
|
* looping condition in cache_grow().
|
|
*/
|
|
*/
|
|
offslab_limit = size;
|
|
offslab_limit = size;
|
|
- offslab_limit /= sizeof(unsigned int);
|
|
|
|
|
|
+ offslab_limit /= sizeof(freelist_idx_t);
|
|
|
|
|
|
if (num > offslab_limit)
|
|
if (num > offslab_limit)
|
|
break;
|
|
break;
|
|
@@ -2103,8 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
cachep->node[numa_mem_id()]->next_reap =
|
|
cachep->node[numa_mem_id()]->next_reap =
|
|
- jiffies + REAPTIMEOUT_LIST3 +
|
|
|
|
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
|
|
|
|
|
|
+ jiffies + REAPTIMEOUT_NODE +
|
|
|
|
+ ((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
|
|
|
|
|
cpu_cache_get(cachep)->avail = 0;
|
|
cpu_cache_get(cachep)->avail = 0;
|
|
cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
|
|
cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
|
|
@@ -2243,7 +2264,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
* it too early on. Always use on-slab management when
|
|
* it too early on. Always use on-slab management when
|
|
* SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
|
|
* SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
|
|
*/
|
|
*/
|
|
- if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
|
|
|
|
|
|
+ if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
|
|
!(flags & SLAB_NOLEAKTRACE))
|
|
!(flags & SLAB_NOLEAKTRACE))
|
|
/*
|
|
/*
|
|
* Size is large, assume best to place the slab management obj
|
|
* Size is large, assume best to place the slab management obj
|
|
@@ -2252,6 +2273,12 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
flags |= CFLGS_OFF_SLAB;
|
|
flags |= CFLGS_OFF_SLAB;
|
|
|
|
|
|
size = ALIGN(size, cachep->align);
|
|
size = ALIGN(size, cachep->align);
|
|
|
|
+ /*
|
|
|
|
+ * We should restrict the number of objects in a slab to implement
|
|
|
|
+ * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
|
|
|
|
+ */
|
|
|
|
+ if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
|
|
|
|
+ size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
|
|
|
|
|
|
left_over = calculate_slab_order(cachep, size, cachep->align, flags);
|
|
left_over = calculate_slab_order(cachep, size, cachep->align, flags);
|
|
|
|
|
|
@@ -2259,7 +2286,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
return -E2BIG;
|
|
return -E2BIG;
|
|
|
|
|
|
freelist_size =
|
|
freelist_size =
|
|
- ALIGN(cachep->num * sizeof(unsigned int), cachep->align);
|
|
|
|
|
|
+ ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
|
|
|
|
|
|
/*
|
|
/*
|
|
* If the slab has been placed off-slab, and we have enough space then
|
|
* If the slab has been placed off-slab, and we have enough space then
|
|
@@ -2272,7 +2299,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
|
|
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
/* really off slab. No need for manual alignment */
|
|
/* really off slab. No need for manual alignment */
|
|
- freelist_size = cachep->num * sizeof(unsigned int);
|
|
|
|
|
|
+ freelist_size = cachep->num * sizeof(freelist_idx_t);
|
|
|
|
|
|
#ifdef CONFIG_PAGE_POISONING
|
|
#ifdef CONFIG_PAGE_POISONING
|
|
/* If we're going to use the generic kernel_map_pages()
|
|
/* If we're going to use the generic kernel_map_pages()
|
|
@@ -2300,10 +2327,10 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
|
|
cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
|
|
/*
|
|
/*
|
|
- * This is a possibility for one of the malloc_sizes caches.
|
|
|
|
|
|
+ * This is a possibility for one of the kmalloc_{dma,}_caches.
|
|
* But since we go off slab only for object size greater than
|
|
* But since we go off slab only for object size greater than
|
|
- * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
|
|
|
|
- * this should not happen at all.
|
|
|
|
|
|
+ * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
|
|
|
|
+ * in ascending order,this should not happen at all.
|
|
* But leave a BUG_ON for some lucky dude.
|
|
* But leave a BUG_ON for some lucky dude.
|
|
*/
|
|
*/
|
|
BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
|
|
BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
|
|
@@ -2511,14 +2538,17 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Get the memory for a slab management obj.
|
|
* Get the memory for a slab management obj.
|
|
- * For a slab cache when the slab descriptor is off-slab, slab descriptors
|
|
|
|
- * always come from malloc_sizes caches. The slab descriptor cannot
|
|
|
|
- * come from the same cache which is getting created because,
|
|
|
|
- * when we are searching for an appropriate cache for these
|
|
|
|
- * descriptors in kmem_cache_create, we search through the malloc_sizes array.
|
|
|
|
- * If we are creating a malloc_sizes cache here it would not be visible to
|
|
|
|
- * kmem_find_general_cachep till the initialization is complete.
|
|
|
|
- * Hence we cannot have freelist_cache same as the original cache.
|
|
|
|
|
|
+ *
|
|
|
|
+ * For a slab cache when the slab descriptor is off-slab, the
|
|
|
|
+ * slab descriptor can't come from the same cache which is being created,
|
|
|
|
+ * Because if it is the case, that means we defer the creation of
|
|
|
|
+ * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
|
|
|
|
+ * And we eventually call down to __kmem_cache_create(), which
|
|
|
|
+ * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
|
|
|
|
+ * This is a "chicken-and-egg" problem.
|
|
|
|
+ *
|
|
|
|
+ * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
|
|
|
|
+ * which are all initialized during kmem_cache_init().
|
|
*/
|
|
*/
|
|
static void *alloc_slabmgmt(struct kmem_cache *cachep,
|
|
static void *alloc_slabmgmt(struct kmem_cache *cachep,
|
|
struct page *page, int colour_off,
|
|
struct page *page, int colour_off,
|
|
@@ -2542,9 +2572,15 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
|
|
return freelist;
|
|
return freelist;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline unsigned int *slab_freelist(struct page *page)
|
|
|
|
|
|
+static inline freelist_idx_t get_free_obj(struct page *page, unsigned char idx)
|
|
{
|
|
{
|
|
- return (unsigned int *)(page->freelist);
|
|
|
|
|
|
+ return ((freelist_idx_t *)page->freelist)[idx];
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void set_free_obj(struct page *page,
|
|
|
|
+ unsigned char idx, freelist_idx_t val)
|
|
|
|
+{
|
|
|
|
+ ((freelist_idx_t *)(page->freelist))[idx] = val;
|
|
}
|
|
}
|
|
|
|
|
|
static void cache_init_objs(struct kmem_cache *cachep,
|
|
static void cache_init_objs(struct kmem_cache *cachep,
|
|
@@ -2589,7 +2625,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
|
if (cachep->ctor)
|
|
if (cachep->ctor)
|
|
cachep->ctor(objp);
|
|
cachep->ctor(objp);
|
|
#endif
|
|
#endif
|
|
- slab_freelist(page)[i] = i;
|
|
|
|
|
|
+ set_free_obj(page, i, i);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2608,7 +2644,7 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
|
|
{
|
|
{
|
|
void *objp;
|
|
void *objp;
|
|
|
|
|
|
- objp = index_to_obj(cachep, page, slab_freelist(page)[page->active]);
|
|
|
|
|
|
+ objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
|
|
page->active++;
|
|
page->active++;
|
|
#if DEBUG
|
|
#if DEBUG
|
|
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
|
|
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
|
|
@@ -2629,7 +2665,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
|
|
|
|
|
|
/* Verify double free bug */
|
|
/* Verify double free bug */
|
|
for (i = page->active; i < cachep->num; i++) {
|
|
for (i = page->active; i < cachep->num; i++) {
|
|
- if (slab_freelist(page)[i] == objnr) {
|
|
|
|
|
|
+ if (get_free_obj(page, i) == objnr) {
|
|
printk(KERN_ERR "slab: double free detected in cache "
|
|
printk(KERN_ERR "slab: double free detected in cache "
|
|
"'%s', objp %p\n", cachep->name, objp);
|
|
"'%s', objp %p\n", cachep->name, objp);
|
|
BUG();
|
|
BUG();
|
|
@@ -2637,7 +2673,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
page->active--;
|
|
page->active--;
|
|
- slab_freelist(page)[page->active] = objnr;
|
|
|
|
|
|
+ set_free_obj(page, page->active, objnr);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2886,9 +2922,9 @@ retry:
|
|
/* move slabp to correct slabp list: */
|
|
/* move slabp to correct slabp list: */
|
|
list_del(&page->lru);
|
|
list_del(&page->lru);
|
|
if (page->active == cachep->num)
|
|
if (page->active == cachep->num)
|
|
- list_add(&page->list, &n->slabs_full);
|
|
|
|
|
|
+ list_add(&page->lru, &n->slabs_full);
|
|
else
|
|
else
|
|
- list_add(&page->list, &n->slabs_partial);
|
|
|
|
|
|
+ list_add(&page->lru, &n->slabs_partial);
|
|
}
|
|
}
|
|
|
|
|
|
must_grow:
|
|
must_grow:
|
|
@@ -3245,11 +3281,11 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
|
kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
|
|
kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
|
|
flags);
|
|
flags);
|
|
|
|
|
|
- if (likely(ptr))
|
|
|
|
|
|
+ if (likely(ptr)) {
|
|
kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
|
|
kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
|
|
-
|
|
|
|
- if (unlikely((flags & __GFP_ZERO) && ptr))
|
|
|
|
- memset(ptr, 0, cachep->object_size);
|
|
|
|
|
|
+ if (unlikely(flags & __GFP_ZERO))
|
|
|
|
+ memset(ptr, 0, cachep->object_size);
|
|
|
|
+ }
|
|
|
|
|
|
return ptr;
|
|
return ptr;
|
|
}
|
|
}
|
|
@@ -3310,17 +3346,17 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
|
|
flags);
|
|
flags);
|
|
prefetchw(objp);
|
|
prefetchw(objp);
|
|
|
|
|
|
- if (likely(objp))
|
|
|
|
|
|
+ if (likely(objp)) {
|
|
kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
|
|
kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
|
|
-
|
|
|
|
- if (unlikely((flags & __GFP_ZERO) && objp))
|
|
|
|
- memset(objp, 0, cachep->object_size);
|
|
|
|
|
|
+ if (unlikely(flags & __GFP_ZERO))
|
|
|
|
+ memset(objp, 0, cachep->object_size);
|
|
|
|
+ }
|
|
|
|
|
|
return objp;
|
|
return objp;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Caller needs to acquire correct kmem_list's list_lock
|
|
|
|
|
|
+ * Caller needs to acquire correct kmem_cache_node's list_lock
|
|
*/
|
|
*/
|
|
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
|
|
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
|
|
int node)
|
|
int node)
|
|
@@ -3574,11 +3610,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
|
|
struct kmem_cache *cachep;
|
|
struct kmem_cache *cachep;
|
|
void *ret;
|
|
void *ret;
|
|
|
|
|
|
- /* If you want to save a few bytes .text space: replace
|
|
|
|
- * __ with kmem_.
|
|
|
|
- * Then kmalloc uses the uninlined functions instead of the inline
|
|
|
|
- * functions.
|
|
|
|
- */
|
|
|
|
cachep = kmalloc_slab(size, flags);
|
|
cachep = kmalloc_slab(size, flags);
|
|
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
|
|
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
|
|
return cachep;
|
|
return cachep;
|
|
@@ -3670,7 +3701,7 @@ EXPORT_SYMBOL(kfree);
|
|
/*
|
|
/*
|
|
* This initializes kmem_cache_node or resizes various caches for all nodes.
|
|
* This initializes kmem_cache_node or resizes various caches for all nodes.
|
|
*/
|
|
*/
|
|
-static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
|
|
|
|
|
|
+static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
|
|
{
|
|
{
|
|
int node;
|
|
int node;
|
|
struct kmem_cache_node *n;
|
|
struct kmem_cache_node *n;
|
|
@@ -3726,8 +3757,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
|
|
}
|
|
}
|
|
|
|
|
|
kmem_cache_node_init(n);
|
|
kmem_cache_node_init(n);
|
|
- n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
|
|
|
|
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
|
|
|
|
|
|
+ n->next_reap = jiffies + REAPTIMEOUT_NODE +
|
|
|
|
+ ((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
|
n->shared = new_shared;
|
|
n->shared = new_shared;
|
|
n->alien = new_alien;
|
|
n->alien = new_alien;
|
|
n->free_limit = (1 + nr_cpus_node(node)) *
|
|
n->free_limit = (1 + nr_cpus_node(node)) *
|
|
@@ -3813,7 +3844,7 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
kfree(ccold);
|
|
kfree(ccold);
|
|
}
|
|
}
|
|
kfree(new);
|
|
kfree(new);
|
|
- return alloc_kmemlist(cachep, gfp);
|
|
|
|
|
|
+ return alloc_kmem_cache_node(cachep, gfp);
|
|
}
|
|
}
|
|
|
|
|
|
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
@@ -3982,7 +4013,7 @@ static void cache_reap(struct work_struct *w)
|
|
if (time_after(n->next_reap, jiffies))
|
|
if (time_after(n->next_reap, jiffies))
|
|
goto next;
|
|
goto next;
|
|
|
|
|
|
- n->next_reap = jiffies + REAPTIMEOUT_LIST3;
|
|
|
|
|
|
+ n->next_reap = jiffies + REAPTIMEOUT_NODE;
|
|
|
|
|
|
drain_array(searchp, n, n->shared, 0, node);
|
|
drain_array(searchp, n, n->shared, 0, node);
|
|
|
|
|
|
@@ -4003,7 +4034,7 @@ next:
|
|
next_reap_node();
|
|
next_reap_node();
|
|
out:
|
|
out:
|
|
/* Set up the next iteration */
|
|
/* Set up the next iteration */
|
|
- schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
|
|
|
|
|
|
+ schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SLABINFO
|
|
#ifdef CONFIG_SLABINFO
|
|
@@ -4210,7 +4241,7 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
|
|
|
|
|
|
for (j = page->active; j < c->num; j++) {
|
|
for (j = page->active; j < c->num; j++) {
|
|
/* Skip freed item */
|
|
/* Skip freed item */
|
|
- if (slab_freelist(page)[j] == i) {
|
|
|
|
|
|
+ if (get_free_obj(page, j) == i) {
|
|
active = false;
|
|
active = false;
|
|
break;
|
|
break;
|
|
}
|
|
}
|