|
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
|
|
assert_spin_locked(pmd_lockptr(mm, pmd));
|
|
assert_spin_locked(pmd_lockptr(mm, pmd));
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * When we COW a devmap PMD entry, we split it into PTEs, so we should
|
|
|
|
+ * not be in this function with `flags & FOLL_COW` set.
|
|
|
|
+ */
|
|
|
|
+ WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
|
|
|
|
+
|
|
if (flags & FOLL_WRITE && !pmd_write(*pmd))
|
|
if (flags & FOLL_WRITE && !pmd_write(*pmd))
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
@@ -1128,6 +1134,16 @@ out_unlock:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * FOLL_FORCE can write to even unwritable pmd's, but only
|
|
|
|
+ * after we've gone through a COW cycle and they are dirty.
|
|
|
|
+ */
|
|
|
|
+static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
|
|
|
|
+{
|
|
|
|
+ return pmd_write(pmd) ||
|
|
|
|
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
|
|
|
|
+}
|
|
|
|
+
|
|
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|
unsigned long addr,
|
|
unsigned long addr,
|
|
pmd_t *pmd,
|
|
pmd_t *pmd,
|
|
@@ -1138,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|
|
|
|
|
assert_spin_locked(pmd_lockptr(mm, pmd));
|
|
assert_spin_locked(pmd_lockptr(mm, pmd));
|
|
|
|
|
|
- if (flags & FOLL_WRITE && !pmd_write(*pmd))
|
|
|
|
|
|
+ if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
/* Avoid dumping huge zero page */
|
|
/* Avoid dumping huge zero page */
|