|
@@ -270,7 +270,9 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
|
|
|
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
|
|
|
} while (0)
|
|
|
|
|
|
+#define CFLGS_OBJFREELIST_SLAB (0x40000000UL)
|
|
|
#define CFLGS_OFF_SLAB (0x80000000UL)
|
|
|
+#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
|
|
|
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
|
|
|
|
|
|
#define BATCHREFILL_LIMIT 16
|
|
@@ -480,7 +482,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
|
|
|
* the slabs are all pages aligned, the objects will be at the
|
|
|
* correct alignment when allocated.
|
|
|
*/
|
|
|
- if (flags & CFLGS_OFF_SLAB) {
|
|
|
+ if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
|
|
|
*num = slab_size / buffer_size;
|
|
|
*left_over = slab_size % buffer_size;
|
|
|
} else {
|
|
@@ -1801,6 +1803,12 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
|
|
|
struct page *page)
|
|
|
{
|
|
|
int i;
|
|
|
+
|
|
|
+ if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
|
|
|
+ poison_obj(cachep, page->freelist - obj_offset(cachep),
|
|
|
+ POISON_FREE);
|
|
|
+ }
|
|
|
+
|
|
|
for (i = 0; i < cachep->num; i++) {
|
|
|
void *objp = index_to_obj(cachep, page, i);
|
|
|
|
|
@@ -2029,6 +2037,29 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
|
|
|
return cachep;
|
|
|
}
|
|
|
|
|
|
+static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
|
|
|
+ size_t size, unsigned long flags)
|
|
|
+{
|
|
|
+ size_t left;
|
|
|
+
|
|
|
+ cachep->num = 0;
|
|
|
+
|
|
|
+ if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ left = calculate_slab_order(cachep, size,
|
|
|
+ flags | CFLGS_OBJFREELIST_SLAB);
|
|
|
+ if (!cachep->num)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ cachep->colour = left / cachep->colour_off;
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
static bool set_off_slab_cache(struct kmem_cache *cachep,
|
|
|
size_t size, unsigned long flags)
|
|
|
{
|
|
@@ -2217,6 +2248,11 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+ if (set_objfreelist_slab_cache(cachep, size, flags)) {
|
|
|
+ flags |= CFLGS_OBJFREELIST_SLAB;
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
+
|
|
|
if (set_off_slab_cache(cachep, size, flags)) {
|
|
|
flags |= CFLGS_OFF_SLAB;
|
|
|
goto done;
|
|
@@ -2434,7 +2470,9 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
|
|
|
page->s_mem = addr + colour_off;
|
|
|
page->active = 0;
|
|
|
|
|
|
- if (OFF_SLAB(cachep)) {
|
|
|
+ if (OBJFREELIST_SLAB(cachep))
|
|
|
+ freelist = NULL;
|
|
|
+ else if (OFF_SLAB(cachep)) {
|
|
|
/* Slab management obj is off-slab. */
|
|
|
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
|
|
|
local_flags, nodeid);
|
|
@@ -2507,6 +2545,11 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
|
|
|
|
|
cache_init_objs_debug(cachep, page);
|
|
|
|
|
|
+ if (OBJFREELIST_SLAB(cachep)) {
|
|
|
+ page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
|
|
|
+ obj_offset(cachep);
|
|
|
+ }
|
|
|
+
|
|
|
for (i = 0; i < cachep->num; i++) {
|
|
|
/* constructor could break poison info */
|
|
|
if (DEBUG == 0 && cachep->ctor)
|
|
@@ -2558,6 +2601,9 @@ static void slab_put_obj(struct kmem_cache *cachep,
|
|
|
}
|
|
|
#endif
|
|
|
page->active--;
|
|
|
+ if (!page->freelist)
|
|
|
+ page->freelist = objp + obj_offset(cachep);
|
|
|
+
|
|
|
set_free_obj(page, page->active, objnr);
|
|
|
}
|
|
|
|
|
@@ -2632,7 +2678,7 @@ static int cache_grow(struct kmem_cache *cachep,
|
|
|
/* Get slab management. */
|
|
|
freelist = alloc_slabmgmt(cachep, page, offset,
|
|
|
local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
|
|
|
- if (!freelist)
|
|
|
+ if (OFF_SLAB(cachep) && !freelist)
|
|
|
goto opps1;
|
|
|
|
|
|
slab_map_pages(cachep, page, freelist);
|
|
@@ -2735,14 +2781,42 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
|
|
|
#define cache_free_debugcheck(x,objp,z) (objp)
|
|
|
#endif
|
|
|
|
|
|
+static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
|
|
|
+ void **list)
|
|
|
+{
|
|
|
+#if DEBUG
|
|
|
+ void *next = *list;
|
|
|
+ void *objp;
|
|
|
+
|
|
|
+ while (next) {
|
|
|
+ objp = next - obj_offset(cachep);
|
|
|
+ next = *(void **)next;
|
|
|
+ poison_obj(cachep, objp, POISON_FREE);
|
|
|
+ }
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
static inline void fixup_slab_list(struct kmem_cache *cachep,
|
|
|
- struct kmem_cache_node *n, struct page *page)
|
|
|
+ struct kmem_cache_node *n, struct page *page,
|
|
|
+ void **list)
|
|
|
{
|
|
|
/* move slabp to correct slabp list: */
|
|
|
list_del(&page->lru);
|
|
|
- if (page->active == cachep->num)
|
|
|
+ if (page->active == cachep->num) {
|
|
|
list_add(&page->lru, &n->slabs_full);
|
|
|
- else
|
|
|
+ if (OBJFREELIST_SLAB(cachep)) {
|
|
|
+#if DEBUG
|
|
|
+ /* Poisoning will be done without holding the lock */
|
|
|
+ if (cachep->flags & SLAB_POISON) {
|
|
|
+ void **objp = page->freelist;
|
|
|
+
|
|
|
+ *objp = *list;
|
|
|
+ *list = objp;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ page->freelist = NULL;
|
|
|
+ }
|
|
|
+ } else
|
|
|
list_add(&page->lru, &n->slabs_partial);
|
|
|
}
|
|
|
|
|
@@ -2768,6 +2842,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
|
|
|
struct kmem_cache_node *n;
|
|
|
struct array_cache *ac;
|
|
|
int node;
|
|
|
+ void *list = NULL;
|
|
|
|
|
|
check_irq_off();
|
|
|
node = numa_mem_id();
|
|
@@ -2819,13 +2894,14 @@ retry:
|
|
|
ac_put_obj(cachep, ac, slab_get_obj(cachep, page));
|
|
|
}
|
|
|
|
|
|
- fixup_slab_list(cachep, n, page);
|
|
|
+ fixup_slab_list(cachep, n, page, &list);
|
|
|
}
|
|
|
|
|
|
must_grow:
|
|
|
n->free_objects -= ac->avail;
|
|
|
alloc_done:
|
|
|
spin_unlock(&n->list_lock);
|
|
|
+ fixup_objfreelist_debug(cachep, &list);
|
|
|
|
|
|
if (unlikely(!ac->avail)) {
|
|
|
int x;
|
|
@@ -3062,6 +3138,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
|
|
|
struct page *page;
|
|
|
struct kmem_cache_node *n;
|
|
|
void *obj;
|
|
|
+ void *list = NULL;
|
|
|
int x;
|
|
|
|
|
|
VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
|
|
@@ -3086,9 +3163,10 @@ retry:
|
|
|
obj = slab_get_obj(cachep, page);
|
|
|
n->free_objects--;
|
|
|
|
|
|
- fixup_slab_list(cachep, n, page);
|
|
|
+ fixup_slab_list(cachep, n, page, &list);
|
|
|
|
|
|
spin_unlock(&n->list_lock);
|
|
|
+ fixup_objfreelist_debug(cachep, &list);
|
|
|
goto done;
|
|
|
|
|
|
must_grow:
|