|
@@ -3906,6 +3906,29 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(handle_mm_fault);
|
|
|
|
|
|
+#ifndef __PAGETABLE_P4D_FOLDED
|
|
|
+/*
|
|
|
+ * Allocate p4d page table.
|
|
|
+ * We've already handled the fast-path in-line.
|
|
|
+ */
|
|
|
+int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
|
|
|
+{
|
|
|
+ p4d_t *new = p4d_alloc_one(mm, address);
|
|
|
+ if (!new)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ smp_wmb(); /* See comment in __pte_alloc */
|
|
|
+
|
|
|
+ spin_lock(&mm->page_table_lock);
|
|
|
+ if (pgd_present(*pgd)) /* Another has populated it */
|
|
|
+ p4d_free(mm, new);
|
|
|
+ else
|
|
|
+ pgd_populate(mm, pgd, new);
|
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#endif /* __PAGETABLE_P4D_FOLDED */
|
|
|
+
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
|
/*
|
|
|
* Allocate page upper directory.
|