|
@@ -869,6 +869,49 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
flags);
|
|
|
}
|
|
|
|
|
|
+static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
|
+ pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
|
|
|
+{
|
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
|
+ pmd_t entry;
|
|
|
+ spinlock_t *ptl;
|
|
|
+
|
|
|
+ ptl = pmd_lock(mm, pmd);
|
|
|
+ if (pmd_none(*pmd)) {
|
|
|
+ entry = pmd_mkhuge(pfn_pmd(pfn, prot));
|
|
|
+ if (write) {
|
|
|
+ entry = pmd_mkyoung(pmd_mkdirty(entry));
|
|
|
+ entry = maybe_pmd_mkwrite(entry, vma);
|
|
|
+ }
|
|
|
+ set_pmd_at(mm, addr, pmd, entry);
|
|
|
+ update_mmu_cache_pmd(vma, addr, pmd);
|
|
|
+ }
|
|
|
+ spin_unlock(ptl);
|
|
|
+ return VM_FAULT_NOPAGE;
|
|
|
+}
|
|
|
+
|
|
|
+int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
|
+ pmd_t *pmd, unsigned long pfn, bool write)
|
|
|
+{
|
|
|
+ pgprot_t pgprot = vma->vm_page_prot;
|
|
|
+ /*
|
|
|
+ * If we had pmd_special, we could avoid all these restrictions,
|
|
|
+ * but we need to be consistent with PTEs and architectures that
|
|
|
+ * can't support a 'special' bit.
|
|
|
+ */
|
|
|
+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
|
|
|
+ BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
|
|
|
+ (VM_PFNMAP|VM_MIXEDMAP));
|
|
|
+ BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
|
|
|
+ BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
|
|
|
+
|
|
|
+ if (addr < vma->vm_start || addr >= vma->vm_end)
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
|
+ if (track_pfn_insert(vma, &pgprot, pfn))
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
|
+ return insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
|
|
|
+}
|
|
|
+
|
|
|
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
|
|
|
struct vm_area_struct *vma)
|