|
@@ -2325,6 +2325,16 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
|
|
_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
|
|
_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
|
|
|
if (dst_vma->vm_flags & VM_WRITE)
|
|
if (dst_vma->vm_flags & VM_WRITE)
|
|
|
_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
|
|
_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
|
|
|
|
|
+ else {
|
|
|
|
|
+ /*
|
|
|
|
|
+ * We don't set the pte dirty if the vma has no
|
|
|
|
|
+ * VM_WRITE permission, so mark the page dirty or it
|
|
|
|
|
+ * could be freed from under us. We could do it
|
|
|
|
|
+ * unconditionally before unlock_page(), but doing it
|
|
|
|
|
+ * only if VM_WRITE is not set is faster.
|
|
|
|
|
+ */
|
|
|
|
|
+ set_page_dirty(page);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
|
|
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
|
|
|
|
|
|
|
@@ -2358,6 +2368,7 @@ out:
|
|
|
return ret;
|
|
return ret;
|
|
|
out_release_uncharge_unlock:
|
|
out_release_uncharge_unlock:
|
|
|
pte_unmap_unlock(dst_pte, ptl);
|
|
pte_unmap_unlock(dst_pte, ptl);
|
|
|
|
|
+ ClearPageDirty(page);
|
|
|
delete_from_page_cache(page);
|
|
delete_from_page_cache(page);
|
|
|
out_release_uncharge:
|
|
out_release_uncharge:
|
|
|
mem_cgroup_cancel_charge(page, memcg, false);
|
|
mem_cgroup_cancel_charge(page, memcg, false);
|