|
@@ -30,6 +30,7 @@ DEFINE_STATIC_KEY_FALSE(page_owner_inited);
|
|
|
|
|
|
static depot_stack_handle_t dummy_handle;
|
|
|
static depot_stack_handle_t failure_handle;
|
|
|
+static depot_stack_handle_t early_handle;
|
|
|
|
|
|
static void init_early_allocated_pages(void);
|
|
|
|
|
@@ -53,7 +54,7 @@ static bool need_page_owner(void)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-static noinline void register_dummy_stack(void)
|
|
|
+static __always_inline depot_stack_handle_t create_dummy_stack(void)
|
|
|
{
|
|
|
unsigned long entries[4];
|
|
|
struct stack_trace dummy;
|
|
@@ -64,21 +65,22 @@ static noinline void register_dummy_stack(void)
|
|
|
dummy.skip = 0;
|
|
|
|
|
|
save_stack_trace(&dummy);
|
|
|
- dummy_handle = depot_save_stack(&dummy, GFP_KERNEL);
|
|
|
+ return depot_save_stack(&dummy, GFP_KERNEL);
|
|
|
}
|
|
|
|
|
|
-static noinline void register_failure_stack(void)
|
|
|
+static noinline void register_dummy_stack(void)
|
|
|
{
|
|
|
- unsigned long entries[4];
|
|
|
- struct stack_trace failure;
|
|
|
+ dummy_handle = create_dummy_stack();
|
|
|
+}
|
|
|
|
|
|
- failure.nr_entries = 0;
|
|
|
- failure.max_entries = ARRAY_SIZE(entries);
|
|
|
- failure.entries = &entries[0];
|
|
|
- failure.skip = 0;
|
|
|
+static noinline void register_failure_stack(void)
|
|
|
+{
|
|
|
+ failure_handle = create_dummy_stack();
|
|
|
+}
|
|
|
|
|
|
- save_stack_trace(&failure);
|
|
|
- failure_handle = depot_save_stack(&failure, GFP_KERNEL);
|
|
|
+static noinline void register_early_stack(void)
|
|
|
+{
|
|
|
+ early_handle = create_dummy_stack();
|
|
|
}
|
|
|
|
|
|
static void init_page_owner(void)
|
|
@@ -88,6 +90,7 @@ static void init_page_owner(void)
|
|
|
|
|
|
register_dummy_stack();
|
|
|
register_failure_stack();
|
|
|
+ register_early_stack();
|
|
|
static_branch_enable(&page_owner_inited);
|
|
|
init_early_allocated_pages();
|
|
|
}
|
|
@@ -165,17 +168,13 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
|
|
|
return handle;
|
|
|
}
|
|
|
|
|
|
-noinline void __set_page_owner(struct page *page, unsigned int order,
|
|
|
- gfp_t gfp_mask)
|
|
|
+static inline void __set_page_owner_handle(struct page_ext *page_ext,
|
|
|
+ depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
|
|
|
{
|
|
|
- struct page_ext *page_ext = lookup_page_ext(page);
|
|
|
struct page_owner *page_owner;
|
|
|
|
|
|
- if (unlikely(!page_ext))
|
|
|
- return;
|
|
|
-
|
|
|
page_owner = get_page_owner(page_ext);
|
|
|
- page_owner->handle = save_stack(gfp_mask);
|
|
|
+ page_owner->handle = handle;
|
|
|
page_owner->order = order;
|
|
|
page_owner->gfp_mask = gfp_mask;
|
|
|
page_owner->last_migrate_reason = -1;
|
|
@@ -183,6 +182,19 @@ noinline void __set_page_owner(struct page *page, unsigned int order,
|
|
|
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
|
|
|
}
|
|
|
|
|
|
+noinline void __set_page_owner(struct page *page, unsigned int order,
|
|
|
+ gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ struct page_ext *page_ext = lookup_page_ext(page);
|
|
|
+ depot_stack_handle_t handle;
|
|
|
+
|
|
|
+ if (unlikely(!page_ext))
|
|
|
+ return;
|
|
|
+
|
|
|
+ handle = save_stack(gfp_mask);
|
|
|
+ __set_page_owner_handle(page_ext, handle, order, gfp_mask);
|
|
|
+}
|
|
|
+
|
|
|
void __set_page_owner_migrate_reason(struct page *page, int reason)
|
|
|
{
|
|
|
struct page_ext *page_ext = lookup_page_ext(page);
|
|
@@ -565,12 +577,12 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
|
|
|
if (unlikely(!page_ext))
|
|
|
continue;
|
|
|
|
|
|
- /* Maybe overraping zone */
|
|
|
+ /* Maybe overlapping zone */
|
|
|
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
|
|
|
continue;
|
|
|
|
|
|
/* Found early allocated page */
|
|
|
- set_page_owner(page, 0, 0);
|
|
|
+ __set_page_owner_handle(page_ext, early_handle, 0, 0);
|
|
|
count++;
|
|
|
}
|
|
|
}
|