|
@@ -271,12 +271,87 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
|
|
|
+ * assumes that pgd should be in one page.
|
|
|
+ *
|
|
|
+ * But kernel with PAE paging that is not running as a Xen domain
|
|
|
+ * only needs to allocate 32 bytes for pgd instead of one page.
|
|
|
+ */
|
|
|
+#ifdef CONFIG_X86_PAE
|
|
|
+
|
|
|
+#include <linux/slab.h>
|
|
|
+
|
|
|
+#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
|
|
|
+#define PGD_ALIGN 32
|
|
|
+
|
|
|
+static struct kmem_cache *pgd_cache;
|
|
|
+
|
|
|
+static int __init pgd_cache_init(void)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * When PAE kernel is running as a Xen domain, it does not use
|
|
|
+ * shared kernel pmd. And this requires a whole page for pgd.
|
|
|
+ */
|
|
|
+ if (!SHARED_KERNEL_PMD)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * when PAE kernel is not running as a Xen domain, it uses
|
|
|
+ * shared kernel pmd. Shared kernel pmd does not require a whole
|
|
|
+ * page for pgd. We are able to just allocate a 32-byte for pgd.
|
|
|
+ * During boot time, we create a 32-byte slab for pgd table allocation.
|
|
|
+ */
|
|
|
+ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
|
|
|
+ SLAB_PANIC, NULL);
|
|
|
+ if (!pgd_cache)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+core_initcall(pgd_cache_init);
|
|
|
+
|
|
|
+static inline pgd_t *_pgd_alloc(void)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
|
|
|
+ * We allocate one page for pgd.
|
|
|
+ */
|
|
|
+ if (!SHARED_KERNEL_PMD)
|
|
|
+ return (pgd_t *)__get_free_page(PGALLOC_GFP);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Now PAE kernel is not running as a Xen domain. We can allocate
|
|
|
+ * a 32-byte slab for pgd to save memory space.
|
|
|
+ */
|
|
|
+ return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void _pgd_free(pgd_t *pgd)
|
|
|
+{
|
|
|
+ if (!SHARED_KERNEL_PMD)
|
|
|
+ free_page((unsigned long)pgd);
|
|
|
+ else
|
|
|
+ kmem_cache_free(pgd_cache, pgd);
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline pgd_t *_pgd_alloc(void)
|
|
|
+{
|
|
|
+ return (pgd_t *)__get_free_page(PGALLOC_GFP);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void _pgd_free(pgd_t *pgd)
|
|
|
+{
|
|
|
+ free_page((unsigned long)pgd);
|
|
|
+}
|
|
|
+#endif /* CONFIG_X86_PAE */
|
|
|
+
|
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
{
|
|
|
pgd_t *pgd;
|
|
|
pmd_t *pmds[PREALLOCATED_PMDS];
|
|
|
|
|
|
- pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
|
|
|
+ pgd = _pgd_alloc();
|
|
|
|
|
|
if (pgd == NULL)
|
|
|
goto out;
|
|
@@ -306,7 +381,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
out_free_pmds:
|
|
|
free_pmds(pmds);
|
|
|
out_free_pgd:
|
|
|
- free_page((unsigned long)pgd);
|
|
|
+ _pgd_free(pgd);
|
|
|
out:
|
|
|
return NULL;
|
|
|
}
|
|
@@ -316,7 +391,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
|
pgd_mop_up_pmds(mm, pgd);
|
|
|
pgd_dtor(pgd);
|
|
|
paravirt_pgd_free(mm, pgd);
|
|
|
- free_page((unsigned long)pgd);
|
|
|
+ _pgd_free(pgd);
|
|
|
}
|
|
|
|
|
|
/*
|