|
@@ -2350,7 +2350,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void freeze_page(struct page *page)
|
|
|
|
|
|
+static void unmap_page(struct page *page)
|
|
{
|
|
{
|
|
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
|
|
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
|
|
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
|
|
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
|
|
@@ -2365,7 +2365,7 @@ static void freeze_page(struct page *page)
|
|
VM_BUG_ON_PAGE(!unmap_success, page);
|
|
VM_BUG_ON_PAGE(!unmap_success, page);
|
|
}
|
|
}
|
|
|
|
|
|
-static void unfreeze_page(struct page *page)
|
|
|
|
|
|
+static void remap_page(struct page *page)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
if (PageTransHuge(page)) {
|
|
if (PageTransHuge(page)) {
|
|
@@ -2483,7 +2483,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
|
|
|
|
|
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
|
|
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
|
|
|
|
|
|
- unfreeze_page(head);
|
|
|
|
|
|
+ remap_page(head);
|
|
|
|
|
|
for (i = 0; i < HPAGE_PMD_NR; i++) {
|
|
for (i = 0; i < HPAGE_PMD_NR; i++) {
|
|
struct page *subpage = head + i;
|
|
struct page *subpage = head + i;
|
|
@@ -2664,7 +2664,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Racy check if we can split the page, before freeze_page() will
|
|
|
|
|
|
+ * Racy check if we can split the page, before unmap_page() will
|
|
* split PMDs
|
|
* split PMDs
|
|
*/
|
|
*/
|
|
if (!can_split_huge_page(head, &extra_pins)) {
|
|
if (!can_split_huge_page(head, &extra_pins)) {
|
|
@@ -2673,7 +2673,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|
}
|
|
}
|
|
|
|
|
|
mlocked = PageMlocked(page);
|
|
mlocked = PageMlocked(page);
|
|
- freeze_page(head);
|
|
|
|
|
|
+ unmap_page(head);
|
|
VM_BUG_ON_PAGE(compound_mapcount(head), head);
|
|
VM_BUG_ON_PAGE(compound_mapcount(head), head);
|
|
|
|
|
|
/* Make sure the page is not on per-CPU pagevec as it takes pin */
|
|
/* Make sure the page is not on per-CPU pagevec as it takes pin */
|
|
@@ -2727,7 +2727,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|
fail: if (mapping)
|
|
fail: if (mapping)
|
|
xa_unlock(&mapping->i_pages);
|
|
xa_unlock(&mapping->i_pages);
|
|
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
|
|
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
|
|
- unfreeze_page(head);
|
|
|
|
|
|
+ remap_page(head);
|
|
ret = -EBUSY;
|
|
ret = -EBUSY;
|
|
}
|
|
}
|
|
|
|
|