|
@@ -1724,6 +1724,19 @@ static bool check_new_pages(struct page *page, unsigned int order)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+inline void post_alloc_hook(struct page *page, unsigned int order,
|
|
|
+ gfp_t gfp_flags)
|
|
|
+{
|
|
|
+ set_page_private(page, 0);
|
|
|
+ set_page_refcounted(page);
|
|
|
+
|
|
|
+ arch_alloc_page(page, order);
|
|
|
+ kernel_map_pages(page, 1 << order, 1);
|
|
|
+ kernel_poison_pages(page, 1 << order, 1);
|
|
|
+ kasan_alloc_pages(page, order);
|
|
|
+ set_page_owner(page, order, gfp_flags);
|
|
|
+}
|
|
|
+
|
|
|
static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
|
|
|
unsigned int alloc_flags)
|
|
|
{
|
|
@@ -1736,13 +1749,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
|
|
|
poisoned &= page_is_poisoned(p);
|
|
|
}
|
|
|
|
|
|
- set_page_private(page, 0);
|
|
|
- set_page_refcounted(page);
|
|
|
-
|
|
|
- arch_alloc_page(page, order);
|
|
|
- kernel_map_pages(page, 1 << order, 1);
|
|
|
- kernel_poison_pages(page, 1 << order, 1);
|
|
|
- kasan_alloc_pages(page, order);
|
|
|
+ post_alloc_hook(page, order, gfp_flags);
|
|
|
|
|
|
if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
|
|
|
for (i = 0; i < (1 << order); i++)
|
|
@@ -1751,8 +1758,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
|
|
|
if (order && (gfp_flags & __GFP_COMP))
|
|
|
prep_compound_page(page, order);
|
|
|
|
|
|
- set_page_owner(page, order, gfp_flags);
|
|
|
-
|
|
|
/*
|
|
|
* page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
|
|
|
* allocate the page. The expectation is that the caller is taking
|