|
@@ -57,6 +57,8 @@
|
|
|
*
|
|
|
* page->private (union with page->first_page): refers to the
|
|
|
* component page after the first page
|
|
|
+ * If the page is first_page for huge object, it stores handle.
|
|
|
+ * Look at size_class->huge.
|
|
|
* page->freelist: points to the first free object in zspage.
|
|
|
* Free objects are linked together using in-place
|
|
|
* metadata.
|
|
@@ -163,7 +165,7 @@
|
|
|
#define ZS_MIN_ALLOC_SIZE \
|
|
|
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
|
|
|
/* each chunk includes extra space to keep handle */
|
|
|
-#define ZS_MAX_ALLOC_SIZE (PAGE_SIZE + ZS_HANDLE_SIZE)
|
|
|
+#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
|
|
|
|
|
|
/*
|
|
|
* On systems with 4K page size, this gives 255 size classes! There is a
|
|
@@ -239,6 +241,8 @@ struct size_class {
|
|
|
|
|
|
/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
|
|
|
int pages_per_zspage;
|
|
|
+ /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
|
|
|
+ bool huge;
|
|
|
|
|
|
#ifdef CONFIG_ZSMALLOC_STAT
|
|
|
struct zs_size_stat stats;
|
|
@@ -300,6 +304,7 @@ struct mapping_area {
|
|
|
#endif
|
|
|
char *vm_addr; /* address of kmap_atomic()'ed pages */
|
|
|
enum zs_mapmode vm_mm; /* mapping mode */
|
|
|
+ bool huge;
|
|
|
};
|
|
|
|
|
|
static int create_handle_cache(struct zs_pool *pool)
|
|
@@ -457,7 +462,7 @@ static int get_size_class_index(int size)
|
|
|
idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
|
|
|
ZS_SIZE_CLASS_DELTA);
|
|
|
|
|
|
- return idx;
|
|
|
+ return min(zs_size_classes - 1, idx);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -666,9 +671,14 @@ static unsigned long handle_to_obj(unsigned long handle)
|
|
|
return *(unsigned long *)handle;
|
|
|
}
|
|
|
|
|
|
-unsigned long obj_to_head(void *obj)
|
|
|
+static unsigned long obj_to_head(struct size_class *class, struct page *page,
|
|
|
+ void *obj)
|
|
|
{
|
|
|
- return *(unsigned long *)obj;
|
|
|
+ if (class->huge) {
|
|
|
+ VM_BUG_ON(!is_first_page(page));
|
|
|
+ return *(unsigned long *)page_private(page);
|
|
|
+ } else
|
|
|
+ return *(unsigned long *)obj;
|
|
|
}
|
|
|
|
|
|
static unsigned long obj_idx_to_offset(struct page *page,
|
|
@@ -954,9 +964,12 @@ static void __zs_unmap_object(struct mapping_area *area,
|
|
|
if (area->vm_mm == ZS_MM_RO)
|
|
|
goto out;
|
|
|
|
|
|
- buf = area->vm_buf + ZS_HANDLE_SIZE;
|
|
|
- size -= ZS_HANDLE_SIZE;
|
|
|
- off += ZS_HANDLE_SIZE;
|
|
|
+ buf = area->vm_buf;
|
|
|
+ if (!area->huge) {
|
|
|
+ buf = buf + ZS_HANDLE_SIZE;
|
|
|
+ size -= ZS_HANDLE_SIZE;
|
|
|
+ off += ZS_HANDLE_SIZE;
|
|
|
+ }
|
|
|
|
|
|
sizes[0] = PAGE_SIZE - off;
|
|
|
sizes[1] = size - sizes[0];
|
|
@@ -1295,7 +1308,10 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
|
|
|
|
|
ret = __zs_map_object(area, pages, off, class->size);
|
|
|
out:
|
|
|
- return ret + ZS_HANDLE_SIZE;
|
|
|
+ if (!class->huge)
|
|
|
+ ret += ZS_HANDLE_SIZE;
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(zs_map_object);
|
|
|
|
|
@@ -1352,8 +1368,12 @@ static unsigned long obj_malloc(struct page *first_page,
|
|
|
vaddr = kmap_atomic(m_page);
|
|
|
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
|
|
|
first_page->freelist = link->next;
|
|
|
- /* record handle in the header of allocated chunk */
|
|
|
- link->handle = handle;
|
|
|
+ if (!class->huge)
|
|
|
+ /* record handle in the header of allocated chunk */
|
|
|
+ link->handle = handle;
|
|
|
+ else
|
|
|
+ /* record handle in first_page->private */
|
|
|
+ set_page_private(first_page, handle);
|
|
|
kunmap_atomic(vaddr);
|
|
|
first_page->inuse++;
|
|
|
zs_stat_inc(class, OBJ_USED, 1);
|
|
@@ -1377,7 +1397,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
|
|
|
struct size_class *class;
|
|
|
struct page *first_page;
|
|
|
|
|
|
- if (unlikely(!size || (size + ZS_HANDLE_SIZE) > ZS_MAX_ALLOC_SIZE))
|
|
|
+ if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
|
|
|
return 0;
|
|
|
|
|
|
handle = alloc_handle(pool);
|
|
@@ -1387,6 +1407,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
|
|
|
/* extra space in chunk to keep the handle */
|
|
|
size += ZS_HANDLE_SIZE;
|
|
|
class = pool->size_class[get_size_class_index(size)];
|
|
|
+ /* In huge class size, we store the handle into first_page->private */
|
|
|
+ if (class->huge) {
|
|
|
+ size -= ZS_HANDLE_SIZE;
|
|
|
+ class = pool->size_class[get_size_class_index(size)];
|
|
|
+ }
|
|
|
|
|
|
spin_lock(&class->lock);
|
|
|
first_page = find_get_zspage(class);
|
|
@@ -1442,6 +1467,8 @@ static void obj_free(struct zs_pool *pool, struct size_class *class,
|
|
|
/* Insert this object in containing zspage's freelist */
|
|
|
link = (struct link_free *)(vaddr + f_offset);
|
|
|
link->next = first_page->freelist;
|
|
|
+ if (class->huge)
|
|
|
+ set_page_private(first_page, 0);
|
|
|
kunmap_atomic(vaddr);
|
|
|
first_page->freelist = (void *)obj;
|
|
|
first_page->inuse--;
|
|
@@ -1567,7 +1594,7 @@ static unsigned long find_alloced_obj(struct page *page, int index,
|
|
|
offset += class->size * index;
|
|
|
|
|
|
while (offset < PAGE_SIZE) {
|
|
|
- head = obj_to_head(addr + offset);
|
|
|
+ head = obj_to_head(class, page, addr + offset);
|
|
|
if (head & OBJ_ALLOCATED_TAG) {
|
|
|
handle = head & ~OBJ_ALLOCATED_TAG;
|
|
|
if (trypin_tag(handle))
|
|
@@ -1837,6 +1864,9 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags)
|
|
|
class->size = size;
|
|
|
class->index = i;
|
|
|
class->pages_per_zspage = pages_per_zspage;
|
|
|
+ if (pages_per_zspage == 1 &&
|
|
|
+ get_maxobj_per_zspage(size, pages_per_zspage) == 1)
|
|
|
+ class->huge = true;
|
|
|
spin_lock_init(&class->lock);
|
|
|
pool->size_class[i] = class;
|
|
|
|