|
@@ -84,6 +84,18 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|
|
|
|
|
#define pmd_pgtable(pmd) pmd_page(pmd)
|
|
|
|
|
|
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
+{
|
|
|
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
|
|
+ pgtable_gfp_flags(mm, GFP_KERNEL));
|
|
|
+}
|
|
|
+
|
|
|
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
|
+{
|
|
|
+ kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|
|
unsigned long address)
|
|
|
{
|
|
@@ -118,26 +130,47 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
|
|
__free_page(ptepage);
|
|
|
}
|
|
|
|
|
|
-extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
|
|
|
+static inline void pgtable_free(void *table, int shift)
|
|
|
+{
|
|
|
+ if (!shift) {
|
|
|
+ pgtable_page_dtor(table);
|
|
|
+ free_page((unsigned long)table);
|
|
|
+ } else {
|
|
|
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
|
|
+ kmem_cache_free(PGT_CACHE(shift), table);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_SMP
|
|
|
-extern void __tlb_remove_table(void *_table);
|
|
|
-#endif
|
|
|
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
|
|
- unsigned long address)
|
|
|
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
|
|
{
|
|
|
- tlb_flush_pgtable(tlb, address);
|
|
|
- pgtable_free_tlb(tlb, page_address(table), 0);
|
|
|
+ unsigned long pgf = (unsigned long)table;
|
|
|
+
|
|
|
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
|
|
+ pgf |= shift;
|
|
|
+ tlb_remove_table(tlb, (void *)pgf);
|
|
|
}
|
|
|
|
|
|
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
+static inline void __tlb_remove_table(void *_table)
|
|
|
{
|
|
|
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
|
|
- pgtable_gfp_flags(mm, GFP_KERNEL));
|
|
|
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
|
|
|
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
|
|
|
+
|
|
|
+ pgtable_free(table, shift);
|
|
|
}
|
|
|
|
|
|
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
|
+#else
|
|
|
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
|
|
{
|
|
|
- kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
|
|
|
+ pgtable_free(table, shift);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
|
|
+ unsigned long address)
|
|
|
+{
|
|
|
+ tlb_flush_pgtable(tlb, address);
|
|
|
+ pgtable_free_tlb(tlb, page_address(table), 0);
|
|
|
}
|
|
|
|
|
|
#define __pmd_free_tlb(tlb, pmd, addr) \
|