|
@@ -3624,7 +3624,6 @@ retry_avoidcopy:
|
|
|
copy_user_huge_page(new_page, old_page, address, vma,
|
|
copy_user_huge_page(new_page, old_page, address, vma,
|
|
|
pages_per_huge_page(h));
|
|
pages_per_huge_page(h));
|
|
|
__SetPageUptodate(new_page);
|
|
__SetPageUptodate(new_page);
|
|
|
- set_page_huge_active(new_page);
|
|
|
|
|
|
|
|
|
|
mmun_start = haddr;
|
|
mmun_start = haddr;
|
|
|
mmun_end = mmun_start + huge_page_size(h);
|
|
mmun_end = mmun_start + huge_page_size(h);
|
|
@@ -3646,6 +3645,7 @@ retry_avoidcopy:
|
|
|
make_huge_pte(vma, new_page, 1));
|
|
make_huge_pte(vma, new_page, 1));
|
|
|
page_remove_rmap(old_page, true);
|
|
page_remove_rmap(old_page, true);
|
|
|
hugepage_add_new_anon_rmap(new_page, vma, haddr);
|
|
hugepage_add_new_anon_rmap(new_page, vma, haddr);
|
|
|
|
|
+ set_page_huge_active(new_page);
|
|
|
/* Make the old page be freed below */
|
|
/* Make the old page be freed below */
|
|
|
new_page = old_page;
|
|
new_page = old_page;
|
|
|
}
|
|
}
|
|
@@ -3730,6 +3730,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|
|
pte_t new_pte;
|
|
pte_t new_pte;
|
|
|
spinlock_t *ptl;
|
|
spinlock_t *ptl;
|
|
|
unsigned long haddr = address & huge_page_mask(h);
|
|
unsigned long haddr = address & huge_page_mask(h);
|
|
|
|
|
+ bool new_page = false;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
|
* Currently, we are forced to kill the process in the event the
|
|
* Currently, we are forced to kill the process in the event the
|
|
@@ -3791,7 +3792,7 @@ retry:
|
|
|
}
|
|
}
|
|
|
clear_huge_page(page, address, pages_per_huge_page(h));
|
|
clear_huge_page(page, address, pages_per_huge_page(h));
|
|
|
__SetPageUptodate(page);
|
|
__SetPageUptodate(page);
|
|
|
- set_page_huge_active(page);
|
|
|
|
|
|
|
+ new_page = true;
|
|
|
|
|
|
|
|
if (vma->vm_flags & VM_MAYSHARE) {
|
|
if (vma->vm_flags & VM_MAYSHARE) {
|
|
|
int err = huge_add_to_page_cache(page, mapping, idx);
|
|
int err = huge_add_to_page_cache(page, mapping, idx);
|
|
@@ -3862,6 +3863,15 @@ retry:
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(ptl);
|
|
spin_unlock(ptl);
|
|
|
|
|
+
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Only make newly allocated pages active. Existing pages found
|
|
|
|
|
+ * in the pagecache could be !page_huge_active() if they have been
|
|
|
|
|
+ * isolated for migration.
|
|
|
|
|
+ */
|
|
|
|
|
+ if (new_page)
|
|
|
|
|
+ set_page_huge_active(page);
|
|
|
|
|
+
|
|
|
unlock_page(page);
|
|
unlock_page(page);
|
|
|
out:
|
|
out:
|
|
|
return ret;
|
|
return ret;
|
|
@@ -4096,7 +4106,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
|
|
* the set_pte_at() write.
|
|
* the set_pte_at() write.
|
|
|
*/
|
|
*/
|
|
|
__SetPageUptodate(page);
|
|
__SetPageUptodate(page);
|
|
|
- set_page_huge_active(page);
|
|
|
|
|
|
|
|
|
|
mapping = dst_vma->vm_file->f_mapping;
|
|
mapping = dst_vma->vm_file->f_mapping;
|
|
|
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
|
|
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
|
|
@@ -4164,6 +4173,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
|
|
update_mmu_cache(dst_vma, dst_addr, dst_pte);
|
|
update_mmu_cache(dst_vma, dst_addr, dst_pte);
|
|
|
|
|
|
|
|
spin_unlock(ptl);
|
|
spin_unlock(ptl);
|
|
|
|
|
+ set_page_huge_active(page);
|
|
|
if (vm_shared)
|
|
if (vm_shared)
|
|
|
unlock_page(page);
|
|
unlock_page(page);
|
|
|
ret = 0;
|
|
ret = 0;
|