|
|
@@ -356,21 +356,6 @@ static __always_inline void slab_unlock(struct page *page)
|
|
|
__bit_spin_unlock(PG_locked, &page->flags);
|
|
|
}
|
|
|
|
|
|
-static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
|
|
|
-{
|
|
|
- struct page tmp;
|
|
|
- tmp.counters = counters_new;
|
|
|
- /*
|
|
|
- * page->counters can cover frozen/inuse/objects as well
|
|
|
- * as page->_refcount. If we assign to ->counters directly
|
|
|
- * we run the risk of losing updates to page->_refcount, so
|
|
|
- * be careful and only assign to the fields we need.
|
|
|
- */
|
|
|
- page->frozen = tmp.frozen;
|
|
|
- page->inuse = tmp.inuse;
|
|
|
- page->objects = tmp.objects;
|
|
|
-}
|
|
|
-
|
|
|
/* Interrupts must be disabled (for the fallback code to work right) */
|
|
|
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
|
|
|
void *freelist_old, unsigned long counters_old,
|
|
|
@@ -392,7 +377,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
|
|
|
if (page->freelist == freelist_old &&
|
|
|
page->counters == counters_old) {
|
|
|
page->freelist = freelist_new;
|
|
|
- set_page_slub_counters(page, counters_new);
|
|
|
+ page->counters = counters_new;
|
|
|
slab_unlock(page);
|
|
|
return true;
|
|
|
}
|
|
|
@@ -431,7 +416,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
|
|
|
if (page->freelist == freelist_old &&
|
|
|
page->counters == counters_old) {
|
|
|
page->freelist = freelist_new;
|
|
|
- set_page_slub_counters(page, counters_new);
|
|
|
+ page->counters = counters_new;
|
|
|
slab_unlock(page);
|
|
|
local_irq_restore(flags);
|
|
|
return true;
|
|
|
@@ -1694,7 +1679,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
|
__ClearPageSlabPfmemalloc(page);
|
|
|
__ClearPageSlab(page);
|
|
|
|
|
|
- page_mapcount_reset(page);
|
|
|
page->mapping = NULL;
|
|
|
if (current->reclaim_state)
|
|
|
current->reclaim_state->reclaimed_slab += pages;
|