|
|
@@ -294,6 +294,32 @@ EXPORT_SYMBOL(nr_online_nodes);
|
|
|
int page_group_by_mobility_disabled __read_mostly;
|
|
|
|
|
|
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
|
|
+/*
|
|
|
+ * During boot we initialize deferred pages on-demand, as needed, but once
|
|
|
+ * page_alloc_init_late() has finished, the deferred pages are all initialized,
|
|
|
+ * and we can permanently disable that path.
|
|
|
+ */
|
|
|
+static DEFINE_STATIC_KEY_TRUE(deferred_pages);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Calling kasan_free_pages() only after deferred memory initialization
|
|
|
+ * has completed. Poisoning pages during deferred memory init will greatly
|
|
|
+ * lengthen the process and cause problem in large memory systems as the
|
|
|
+ * deferred pages initialization is done with interrupt disabled.
|
|
|
+ *
|
|
|
+ * Assuming that there will be no reference to those newly initialized
|
|
|
+ * pages before they are ever allocated, this should have no effect on
|
|
|
+ * KASAN memory tracking as the poison will be properly inserted at page
|
|
|
+ * allocation time. The only corner case is when pages are allocated by
|
|
|
+ * on-demand allocation and then freed again before the deferred pages
|
|
|
+ * initialization is done, but this is not likely to happen.
|
|
|
+ */
|
|
|
+static inline void kasan_free_nondeferred_pages(struct page *page, int order)
|
|
|
+{
|
|
|
+ if (!static_branch_unlikely(&deferred_pages))
|
|
|
+ kasan_free_pages(page, order);
|
|
|
+}
|
|
|
+
|
|
|
/* Returns true if the struct page for the pfn is uninitialised */
|
|
|
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
|
|
|
{
|
|
|
@@ -326,6 +352,8 @@ static inline bool update_defer_init(pg_data_t *pgdat,
|
|
|
return true;
|
|
|
}
|
|
|
#else
|
|
|
+#define kasan_free_nondeferred_pages(p, o) kasan_free_pages(p, o)
|
|
|
+
|
|
|
static inline bool early_page_uninitialised(unsigned long pfn)
|
|
|
{
|
|
|
return false;
|
|
|
@@ -1030,7 +1058,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
|
|
arch_free_page(page, order);
|
|
|
kernel_poison_pages(page, 1 << order, 0);
|
|
|
kernel_map_pages(page, 1 << order, 0);
|
|
|
- kasan_free_pages(page, order);
|
|
|
+ kasan_free_nondeferred_pages(page, order);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
|
@@ -1593,13 +1621,6 @@ static int __init deferred_init_memmap(void *data)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * During boot we initialize deferred pages on-demand, as needed, but once
|
|
|
- * page_alloc_init_late() has finished, the deferred pages are all initialized,
|
|
|
- * and we can permanently disable that path.
|
|
|
- */
|
|
|
-static DEFINE_STATIC_KEY_TRUE(deferred_pages);
|
|
|
-
|
|
|
/*
|
|
|
* If this zone has deferred pages, try to grow it by initializing enough
|
|
|
* deferred pages to satisfy the allocation specified by order, rounded up to
|