|
@@ -23,6 +23,7 @@
|
|
|
#include <linux/freezer.h>
|
|
|
#include <linux/pfn_t.h>
|
|
|
#include <linux/mman.h>
|
|
|
+#include <linux/memremap.h>
|
|
|
#include <linux/pagemap.h>
|
|
|
#include <linux/debugfs.h>
|
|
|
#include <linux/migrate.h>
|
|
@@ -974,6 +975,63 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
|
return VM_FAULT_NOPAGE;
|
|
|
}
|
|
|
|
|
|
+static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
|
+ pmd_t *pmd)
|
|
|
+{
|
|
|
+ pmd_t _pmd;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We should set the dirty bit only for FOLL_WRITE but for now
|
|
|
+ * the dirty bit in the pmd is meaningless. And if the dirty
|
|
|
+ * bit will become meaningful and we'll only set it with
|
|
|
+ * FOLL_WRITE, an atomic set_bit will be required on the pmd to
|
|
|
+ * set the young bit, instead of the current set_pmd_at.
|
|
|
+ */
|
|
|
+ _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
|
|
|
+ if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
|
|
|
+ pmd, _pmd, 1))
|
|
|
+ update_mmu_cache_pmd(vma, addr, pmd);
|
|
|
+}
|
|
|
+
|
|
|
+struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
|
+ pmd_t *pmd, int flags)
|
|
|
+{
|
|
|
+ unsigned long pfn = pmd_pfn(*pmd);
|
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
|
+ struct dev_pagemap *pgmap;
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ assert_spin_locked(pmd_lockptr(mm, pmd));
|
|
|
+
|
|
|
+ if (flags & FOLL_WRITE && !pmd_write(*pmd))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ if (pmd_present(*pmd) && pmd_devmap(*pmd))
|
|
|
+ /* pass */;
|
|
|
+ else
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ if (flags & FOLL_TOUCH)
|
|
|
+ touch_pmd(vma, addr, pmd);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * device mapped pages can only be returned if the
|
|
|
+ * caller will manage the page reference count.
|
|
|
+ */
|
|
|
+ if (!(flags & FOLL_GET))
|
|
|
+ return ERR_PTR(-EEXIST);
|
|
|
+
|
|
|
+ pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
|
|
|
+ pgmap = get_dev_pagemap(pfn, NULL);
|
|
|
+ if (!pgmap)
|
|
|
+ return ERR_PTR(-EFAULT);
|
|
|
+ page = pfn_to_page(pfn);
|
|
|
+ get_page(page);
|
|
|
+ put_dev_pagemap(pgmap);
|
|
|
+
|
|
|
+ return page;
|
|
|
+}
|
|
|
+
|
|
|
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
|
|
|
struct vm_area_struct *vma)
|
|
@@ -1331,21 +1389,8 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|
|
|
|
|
page = pmd_page(*pmd);
|
|
|
VM_BUG_ON_PAGE(!PageHead(page), page);
|
|
|
- if (flags & FOLL_TOUCH) {
|
|
|
- pmd_t _pmd;
|
|
|
- /*
|
|
|
- * We should set the dirty bit only for FOLL_WRITE but
|
|
|
- * for now the dirty bit in the pmd is meaningless.
|
|
|
- * And if the dirty bit will become meaningful and
|
|
|
- * we'll only set it with FOLL_WRITE, an atomic
|
|
|
- * set_bit will be required on the pmd to set the
|
|
|
- * young bit, instead of the current set_pmd_at.
|
|
|
- */
|
|
|
- _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
|
|
|
- if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
|
|
|
- pmd, _pmd, 1))
|
|
|
- update_mmu_cache_pmd(vma, addr, pmd);
|
|
|
- }
|
|
|
+ if (flags & FOLL_TOUCH)
|
|
|
+ touch_pmd(vma, addr, pmd);
|
|
|
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
|
|
|
/*
|
|
|
* We don't mlock() pte-mapped THPs. This way we can avoid
|