|
@@ -928,6 +928,23 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
|
|
|
ret = -EAGAIN;
|
|
|
pmd = *src_pmd;
|
|
|
+
|
|
|
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
|
|
+ if (unlikely(is_swap_pmd(pmd))) {
|
|
|
+ swp_entry_t entry = pmd_to_swp_entry(pmd);
|
|
|
+
|
|
|
+ VM_BUG_ON(!is_pmd_migration_entry(pmd));
|
|
|
+ if (is_write_migration_entry(entry)) {
|
|
|
+ make_migration_entry_read(&entry);
|
|
|
+ pmd = swp_entry_to_pmd(entry);
|
|
|
+ set_pmd_at(src_mm, addr, src_pmd, pmd);
|
|
|
+ }
|
|
|
+ set_pmd_at(dst_mm, addr, dst_pmd, pmd);
|
|
|
+ ret = 0;
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
if (unlikely(!pmd_trans_huge(pmd))) {
|
|
|
pte_free(dst_mm, pgtable);
|
|
|
goto out_unlock;
|
|
@@ -1599,6 +1616,12 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|
|
if (is_huge_zero_pmd(orig_pmd))
|
|
|
goto out;
|
|
|
|
|
|
+ if (unlikely(!pmd_present(orig_pmd))) {
|
|
|
+ VM_BUG_ON(thp_migration_supported() &&
|
|
|
+ !is_pmd_migration_entry(orig_pmd));
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
page = pmd_page(orig_pmd);
|
|
|
/*
|
|
|
* If other processes are mapping this page, we couldn't discard
|
|
@@ -1810,6 +1833,25 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
preserve_write = prot_numa && pmd_write(*pmd);
|
|
|
ret = 1;
|
|
|
|
|
|
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
|
|
+ if (is_swap_pmd(*pmd)) {
|
|
|
+ swp_entry_t entry = pmd_to_swp_entry(*pmd);
|
|
|
+
|
|
|
+ VM_BUG_ON(!is_pmd_migration_entry(*pmd));
|
|
|
+ if (is_write_migration_entry(entry)) {
|
|
|
+ pmd_t newpmd;
|
|
|
+ /*
|
|
|
+ * A protection check is difficult so
|
|
|
+ * just be safe and disable write
|
|
|
+ */
|
|
|
+ make_migration_entry_read(&entry);
|
|
|
+ newpmd = swp_entry_to_pmd(entry);
|
|
|
+ set_pmd_at(mm, addr, pmd, newpmd);
|
|
|
+ }
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Avoid trapping faults against the zero page. The read-only
|
|
|
* data is likely to be read-cached on the local CPU and
|
|
@@ -1875,7 +1917,8 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
|
|
|
{
|
|
|
spinlock_t *ptl;
|
|
|
ptl = pmd_lock(vma->vm_mm, pmd);
|
|
|
- if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
|
|
|
+ if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
|
|
|
+ pmd_devmap(*pmd)))
|
|
|
return ptl;
|
|
|
spin_unlock(ptl);
|
|
|
return NULL;
|
|
@@ -1993,14 +2036,15 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
struct page *page;
|
|
|
pgtable_t pgtable;
|
|
|
pmd_t _pmd;
|
|
|
- bool young, write, dirty, soft_dirty;
|
|
|
+ bool young, write, dirty, soft_dirty, pmd_migration = false;
|
|
|
unsigned long addr;
|
|
|
int i;
|
|
|
|
|
|
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
|
|
|
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
|
|
|
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
|
|
|
- VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd));
|
|
|
+ VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
|
|
|
+ && !pmd_devmap(*pmd));
|
|
|
|
|
|
count_vm_event(THP_SPLIT_PMD);
|
|
|
|
|
@@ -2025,7 +2069,16 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
return __split_huge_zero_page_pmd(vma, haddr, pmd);
|
|
|
}
|
|
|
|
|
|
- page = pmd_page(*pmd);
|
|
|
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
|
|
+ pmd_migration = is_pmd_migration_entry(*pmd);
|
|
|
+ if (pmd_migration) {
|
|
|
+ swp_entry_t entry;
|
|
|
+
|
|
|
+ entry = pmd_to_swp_entry(*pmd);
|
|
|
+ page = pfn_to_page(swp_offset(entry));
|
|
|
+ } else
|
|
|
+#endif
|
|
|
+ page = pmd_page(*pmd);
|
|
|
VM_BUG_ON_PAGE(!page_count(page), page);
|
|
|
page_ref_add(page, HPAGE_PMD_NR - 1);
|
|
|
write = pmd_write(*pmd);
|
|
@@ -2044,7 +2097,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
* transferred to avoid any possibility of altering
|
|
|
* permissions across VMAs.
|
|
|
*/
|
|
|
- if (freeze) {
|
|
|
+ if (freeze || pmd_migration) {
|
|
|
swp_entry_t swp_entry;
|
|
|
swp_entry = make_migration_entry(page + i, write);
|
|
|
entry = swp_entry_to_pte(swp_entry);
|
|
@@ -2143,7 +2196,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
page = pmd_page(*pmd);
|
|
|
if (PageMlocked(page))
|
|
|
clear_page_mlock(page);
|
|
|
- } else if (!pmd_devmap(*pmd))
|
|
|
+ } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
|
|
|
goto out;
|
|
|
__split_huge_pmd_locked(vma, pmd, haddr, freeze);
|
|
|
out:
|