|
@@ -1686,17 +1686,9 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
|
__free_pages(page, order);
|
|
|
}
|
|
|
|
|
|
-#define need_reserve_slab_rcu \
|
|
|
- (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
|
|
|
-
|
|
|
static void rcu_free_slab(struct rcu_head *h)
|
|
|
{
|
|
|
- struct page *page;
|
|
|
-
|
|
|
- if (need_reserve_slab_rcu)
|
|
|
- page = virt_to_head_page(h);
|
|
|
- else
|
|
|
- page = container_of((struct list_head *)h, struct page, lru);
|
|
|
+ struct page *page = container_of(h, struct page, rcu_head);
|
|
|
|
|
|
__free_slab(page->slab_cache, page);
|
|
|
}
|
|
@@ -1704,19 +1696,7 @@ static void rcu_free_slab(struct rcu_head *h)
|
|
|
static void free_slab(struct kmem_cache *s, struct page *page)
|
|
|
{
|
|
|
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
|
|
|
- struct rcu_head *head;
|
|
|
-
|
|
|
- if (need_reserve_slab_rcu) {
|
|
|
- int order = compound_order(page);
|
|
|
- int offset = (PAGE_SIZE << order) - s->reserved;
|
|
|
-
|
|
|
- VM_BUG_ON(s->reserved != sizeof(*head));
|
|
|
- head = page_address(page) + offset;
|
|
|
- } else {
|
|
|
- head = &page->rcu_head;
|
|
|
- }
|
|
|
-
|
|
|
- call_rcu(head, rcu_free_slab);
|
|
|
+ call_rcu(&page->rcu_head, rcu_free_slab);
|
|
|
} else
|
|
|
__free_slab(s, page);
|
|
|
}
|
|
@@ -3583,9 +3563,6 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
|
|
|
s->random = get_random_long();
|
|
|
#endif
|
|
|
|
|
|
- if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
|
|
|
- s->reserved = sizeof(struct rcu_head);
|
|
|
-
|
|
|
if (!calculate_sizes(s, -1))
|
|
|
goto error;
|
|
|
if (disable_higher_order_debug) {
|