|
@@ -157,6 +157,24 @@ size_t ksize(const void *);
|
|
|
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
|
|
+ * Intended for arches that get misalignment faults even for 64 bit integer
|
|
|
+ * aligned buffers.
|
|
|
+ */
|
|
|
+#ifndef ARCH_SLAB_MINALIGN
|
|
|
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
|
|
+#endif
|
|
|
+
|
|
|
+/*
|
|
|
+ * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
|
|
|
+ * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
|
|
|
+ * aligned pointers.
|
|
|
+ */
|
|
|
+#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
|
|
|
+#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
|
|
|
+#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
|
|
|
+
|
|
|
/*
|
|
|
* Kmalloc array related definitions
|
|
|
*/
|
|
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
|
|
|
}
|
|
|
#endif /* !CONFIG_SLOB */
|
|
|
|
|
|
-void *__kmalloc(size_t size, gfp_t flags);
|
|
|
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
|
|
|
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
|
|
|
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
|
|
|
void kmem_cache_free(struct kmem_cache *, void *);
|
|
|
|
|
|
/*
|
|
@@ -301,8 +319,8 @@ void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
|
|
|
bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
|
|
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
|
|
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
|
|
|
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
|
|
|
#else
|
|
|
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
|
|
{
|
|
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
|
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_TRACING
|
|
|
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
|
|
|
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
|
|
gfp_t gfpflags,
|
|
|
- int node, size_t size);
|
|
|
+ int node, size_t size) __assume_slab_alignment;
|
|
|
#else
|
|
|
static __always_inline void *
|
|
|
kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
|
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
|
|
}
|
|
|
#endif /* CONFIG_TRACING */
|
|
|
|
|
|
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
|
|
|
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
|
|
|
|
|
|
#ifdef CONFIG_TRACING
|
|
|
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
|
|
|
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
|
|
|
#else
|
|
|
static __always_inline void *
|
|
|
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
|
|
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|
|
return __kmalloc_node(size, flags, node);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
|
|
- * Intended for arches that get misalignment faults even for 64 bit integer
|
|
|
- * aligned buffers.
|
|
|
- */
|
|
|
-#ifndef ARCH_SLAB_MINALIGN
|
|
|
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
|
|
-#endif
|
|
|
-
|
|
|
struct memcg_cache_array {
|
|
|
struct rcu_head rcu;
|
|
|
struct kmem_cache *entries[0];
|