|
@@ -65,11 +65,34 @@ static noinline void __init kmalloc_node_oob_right(void)
|
|
|
kfree(ptr);
|
|
|
}
|
|
|
|
|
|
-static noinline void __init kmalloc_large_oob_right(void)
|
|
|
+#ifdef CONFIG_SLUB
|
|
|
+static noinline void __init kmalloc_pagealloc_oob_right(void)
|
|
|
{
|
|
|
char *ptr;
|
|
|
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
|
|
|
|
|
|
+ /* Allocate a chunk that does not fit into a SLUB cache to trigger
|
|
|
+ * the page allocator fallback.
|
|
|
+ */
|
|
|
+ pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n");
|
|
|
+ ptr = kmalloc(size, GFP_KERNEL);
|
|
|
+ if (!ptr) {
|
|
|
+ pr_err("Allocation failed\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ ptr[size] = 0;
|
|
|
+ kfree(ptr);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+static noinline void __init kmalloc_large_oob_right(void)
|
|
|
+{
|
|
|
+ char *ptr;
|
|
|
+ size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
|
|
|
+ /* Allocate a chunk that is large enough, but still fits into a slab
|
|
|
+ * and does not trigger the page allocator fallback in SLUB.
|
|
|
+ */
|
|
|
pr_info("kmalloc large allocation: out-of-bounds to right\n");
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
|
|
if (!ptr) {
|
|
@@ -324,6 +347,9 @@ static int __init kmalloc_tests_init(void)
|
|
|
kmalloc_oob_right();
|
|
|
kmalloc_oob_left();
|
|
|
kmalloc_node_oob_right();
|
|
|
+#ifdef CONFIG_SLUB
|
|
|
+ kmalloc_pagealloc_oob_right();
|
|
|
+#endif
|
|
|
kmalloc_large_oob_right();
|
|
|
kmalloc_oob_krealloc_more();
|
|
|
kmalloc_oob_krealloc_less();
|