|
@@ -791,13 +791,8 @@ static void start_cpu_timer(int cpu)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static struct array_cache *alloc_arraycache(int node, int entries,
|
|
|
- int batchcount, gfp_t gfp)
|
|
|
+static void init_arraycache(struct array_cache *ac, int limit, int batch)
|
|
|
{
|
|
|
- int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
|
|
|
- struct array_cache *nc = NULL;
|
|
|
-
|
|
|
- nc = kmalloc_node(memsize, gfp, node);
|
|
|
/*
|
|
|
* The array_cache structures contain pointers to free object.
|
|
|
* However, when such objects are allocated or transferred to another
|
|
@@ -805,15 +800,25 @@ static struct array_cache *alloc_arraycache(int node, int entries,
|
|
|
* valid references during a kmemleak scan. Therefore, kmemleak must
|
|
|
* not scan such objects.
|
|
|
*/
|
|
|
- kmemleak_no_scan(nc);
|
|
|
- if (nc) {
|
|
|
- nc->avail = 0;
|
|
|
- nc->limit = entries;
|
|
|
- nc->batchcount = batchcount;
|
|
|
- nc->touched = 0;
|
|
|
- spin_lock_init(&nc->lock);
|
|
|
+ kmemleak_no_scan(ac);
|
|
|
+ if (ac) {
|
|
|
+ ac->avail = 0;
|
|
|
+ ac->limit = limit;
|
|
|
+ ac->batchcount = batch;
|
|
|
+ ac->touched = 0;
|
|
|
+ spin_lock_init(&ac->lock);
|
|
|
}
|
|
|
- return nc;
|
|
|
+}
|
|
|
+
|
|
|
+static struct array_cache *alloc_arraycache(int node, int entries,
|
|
|
+ int batchcount, gfp_t gfp)
|
|
|
+{
|
|
|
+ int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
|
|
|
+ struct array_cache *ac = NULL;
|
|
|
+
|
|
|
+ ac = kmalloc_node(memsize, gfp, node);
|
|
|
+ init_arraycache(ac, entries, batchcount);
|
|
|
+ return ac;
|
|
|
}
|
|
|
|
|
|
static inline bool is_slab_pfmemalloc(struct page *page)
|