|
|
@@ -2410,12 +2410,11 @@ static void __split_huge_page_tail(struct page *head, int tail,
|
|
|
}
|
|
|
|
|
|
static void __split_huge_page(struct page *page, struct list_head *list,
|
|
|
- unsigned long flags)
|
|
|
+ pgoff_t end, unsigned long flags)
|
|
|
{
|
|
|
struct page *head = compound_head(page);
|
|
|
struct zone *zone = page_zone(head);
|
|
|
struct lruvec *lruvec;
|
|
|
- pgoff_t end = -1;
|
|
|
int i;
|
|
|
|
|
|
lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
|
|
|
@@ -2423,9 +2422,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
|
|
/* complete memcg works before add pages to LRU */
|
|
|
mem_cgroup_split_huge_fixup(head);
|
|
|
|
|
|
- if (!PageAnon(page))
|
|
|
- end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
|
|
|
-
|
|
|
for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
|
|
|
__split_huge_page_tail(head, i, lruvec, list);
|
|
|
/* Some pages can be beyond i_size: drop them from page cache */
|
|
|
@@ -2597,6 +2593,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|
|
int count, mapcount, extra_pins, ret;
|
|
|
bool mlocked;
|
|
|
unsigned long flags;
|
|
|
+ pgoff_t end;
|
|
|
|
|
|
VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
|
|
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
|
|
@@ -2619,6 +2616,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|
|
ret = -EBUSY;
|
|
|
goto out;
|
|
|
}
|
|
|
+ end = -1;
|
|
|
mapping = NULL;
|
|
|
anon_vma_lock_write(anon_vma);
|
|
|
} else {
|
|
|
@@ -2632,6 +2630,15 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|
|
|
|
|
anon_vma = NULL;
|
|
|
i_mmap_lock_read(mapping);
|
|
|
+
|
|
|
+ /*
|
|
|
+ *__split_huge_page() may need to trim off pages beyond EOF:
|
|
|
+ * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
|
|
|
+ * which cannot be nested inside the page tree lock. So note
|
|
|
+ * end now: i_size itself may be changed at any moment, but
|
|
|
+ * head page lock is good enough to serialize the trimming.
|
|
|
+ */
|
|
|
+ end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -2681,7 +2688,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|
|
if (mapping)
|
|
|
__dec_node_page_state(page, NR_SHMEM_THPS);
|
|
|
spin_unlock(&pgdata->split_queue_lock);
|
|
|
- __split_huge_page(page, list, flags);
|
|
|
+ __split_huge_page(page, list, end, flags);
|
|
|
if (PageSwapCache(head)) {
|
|
|
swp_entry_t entry = { .val = page_private(head) };
|
|
|
|