|
@@ -290,6 +290,16 @@ void *__kmalloc(size_t size, gfp_t flags);
|
|
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
|
|
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
|
|
void kmem_cache_free(struct kmem_cache *, void *);
|
|
void kmem_cache_free(struct kmem_cache *, void *);
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Bulk allocation and freeing operations. These are accellerated in an
|
|
|
|
+ * allocator specific way to avoid taking locks repeatedly or building
|
|
|
|
+ * metadata structures unnecessarily.
|
|
|
|
+ *
|
|
|
|
+ * Note that interrupts must be enabled when calling these functions.
|
|
|
|
+ */
|
|
|
|
+void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
|
|
|
|
+bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
|
|
|
|
+
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
|
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
|
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
|
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|