|
@@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];
|
|
|
pgtable_cache[(shift) - 1]; \
|
|
|
})
|
|
|
|
|
|
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
|
|
|
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
|
|
|
|
|
|
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
|
|
|
extern void pte_fragment_free(unsigned long *, int);
|
|
@@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
|
|
|
return (pgd_t *)__get_free_page(PGALLOC_GFP);
|
|
|
#else
|
|
|
struct page *page;
|
|
|
- page = alloc_pages(PGALLOC_GFP, 4);
|
|
|
+ page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
|
|
|
if (!page)
|
|
|
return NULL;
|
|
|
return (pgd_t *) page_address(page);
|
|
@@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
|
|
|
|
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
{
|
|
|
- return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
|
|
|
- GFP_KERNEL|__GFP_REPEAT);
|
|
|
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
|
|
|
}
|
|
|
|
|
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
|
@@ -115,8 +114,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
|
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
{
|
|
|
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
|
|
- GFP_KERNEL|__GFP_REPEAT);
|
|
|
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
|
|
|
}
|
|
|
|
|
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|