|
@@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
int target_nid, last_cpupid = -1;
|
|
|
bool page_locked;
|
|
|
bool migrated = false;
|
|
|
+ bool was_writable;
|
|
|
int flags = 0;
|
|
|
|
|
|
/* A PROT_NONE fault should not end up here */
|
|
@@ -1354,7 +1355,10 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
goto out;
|
|
|
clear_pmdnuma:
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
+ was_writable = pmd_write(pmd);
|
|
|
pmd = pmd_modify(pmd, vma->vm_page_prot);
|
|
|
+ if (was_writable)
|
|
|
+ pmd = pmd_mkwrite(pmd);
|
|
|
set_pmd_at(mm, haddr, pmdp, pmd);
|
|
|
update_mmu_cache_pmd(vma, addr, pmdp);
|
|
|
unlock_page(page);
|
|
@@ -1478,6 +1482,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
|
|
|
if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
|
|
|
pmd_t entry;
|
|
|
+ bool preserve_write = prot_numa && pmd_write(*pmd);
|
|
|
ret = 1;
|
|
|
|
|
|
/*
|
|
@@ -1493,9 +1498,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
if (!prot_numa || !pmd_protnone(*pmd)) {
|
|
|
entry = pmdp_get_and_clear_notify(mm, addr, pmd);
|
|
|
entry = pmd_modify(entry, newprot);
|
|
|
+ if (preserve_write)
|
|
|
+ entry = pmd_mkwrite(entry);
|
|
|
ret = HPAGE_PMD_NR;
|
|
|
set_pmd_at(mm, addr, pmd, entry);
|
|
|
- BUG_ON(pmd_write(entry));
|
|
|
+ BUG_ON(!preserve_write && pmd_write(entry));
|
|
|
}
|
|
|
spin_unlock(ptl);
|
|
|
}
|