pgalloc.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_PGALLOC_H
  3. #define _ASM_X86_PGALLOC_H
  4. #include <linux/threads.h>
  5. #include <linux/mm.h> /* for struct page */
  6. #include <linux/pagemap.h>
  7. static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
  8. #ifdef CONFIG_PARAVIRT_XXL
  9. #include <asm/paravirt.h>
  10. #else
  11. #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
  12. static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
  13. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
  14. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
  15. static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
  16. unsigned long start, unsigned long count) {}
  17. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
  18. static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {}
  19. static inline void paravirt_release_pte(unsigned long pfn) {}
  20. static inline void paravirt_release_pmd(unsigned long pfn) {}
  21. static inline void paravirt_release_pud(unsigned long pfn) {}
  22. static inline void paravirt_release_p4d(unsigned long pfn) {}
  23. #endif
  24. /*
  25. * Flags to use when allocating a user page table page.
  26. */
  27. extern gfp_t __userpte_alloc_gfp;
  28. #ifdef CONFIG_PAGE_TABLE_ISOLATION
  29. /*
  30. * Instead of one PGD, we acquire two PGDs. Being order-1, it is
  31. * both 8k in size and 8k-aligned. That lets us just flip bit 12
  32. * in a pointer to swap between the two 4k halves.
  33. */
  34. #define PGD_ALLOCATION_ORDER 1
  35. #else
  36. #define PGD_ALLOCATION_ORDER 0
  37. #endif
  38. /*
  39. * Allocate and free page tables.
  40. */
  41. extern pgd_t *pgd_alloc(struct mm_struct *);
  42. extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  43. extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  44. extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  45. /* Should really implement gc for free page table pages. This could be
  46. done with a reference count in struct page. */
  47. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  48. {
  49. BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
  50. free_page((unsigned long)pte);
  51. }
  52. static inline void pte_free(struct mm_struct *mm, struct page *pte)
  53. {
  54. pgtable_page_dtor(pte);
  55. __free_page(pte);
  56. }
  57. extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
  58. static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
  59. unsigned long address)
  60. {
  61. ___pte_free_tlb(tlb, pte);
  62. }
  63. static inline void pmd_populate_kernel(struct mm_struct *mm,
  64. pmd_t *pmd, pte_t *pte)
  65. {
  66. paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
  67. set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
  68. }
  69. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  70. struct page *pte)
  71. {
  72. unsigned long pfn = page_to_pfn(pte);
  73. paravirt_alloc_pte(mm, pfn);
  74. set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
  75. }
  76. #define pmd_pgtable(pmd) pmd_page(pmd)
  77. #if CONFIG_PGTABLE_LEVELS > 2
  78. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  79. {
  80. struct page *page;
  81. gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
  82. if (mm == &init_mm)
  83. gfp &= ~__GFP_ACCOUNT;
  84. page = alloc_pages(gfp, 0);
  85. if (!page)
  86. return NULL;
  87. if (!pgtable_pmd_page_ctor(page)) {
  88. __free_pages(page, 0);
  89. return NULL;
  90. }
  91. return (pmd_t *)page_address(page);
  92. }
  93. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  94. {
  95. BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
  96. pgtable_pmd_page_dtor(virt_to_page(pmd));
  97. free_page((unsigned long)pmd);
  98. }
  99. extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
  100. static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
  101. unsigned long address)
  102. {
  103. ___pmd_free_tlb(tlb, pmd);
  104. }
  105. #ifdef CONFIG_X86_PAE
  106. extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
  107. #else /* !CONFIG_X86_PAE */
  108. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  109. {
  110. paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
  111. set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
  112. }
  113. #endif /* CONFIG_X86_PAE */
  114. #if CONFIG_PGTABLE_LEVELS > 3
  115. static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
  116. {
  117. paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
  118. set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
  119. }
  120. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  121. {
  122. gfp_t gfp = GFP_KERNEL_ACCOUNT;
  123. if (mm == &init_mm)
  124. gfp &= ~__GFP_ACCOUNT;
  125. return (pud_t *)get_zeroed_page(gfp);
  126. }
  127. static inline void pud_free(struct mm_struct *mm, pud_t *pud)
  128. {
  129. BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
  130. free_page((unsigned long)pud);
  131. }
  132. extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
  133. static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
  134. unsigned long address)
  135. {
  136. ___pud_free_tlb(tlb, pud);
  137. }
  138. #if CONFIG_PGTABLE_LEVELS > 4
  139. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
  140. {
  141. if (!pgtable_l5_enabled())
  142. return;
  143. paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
  144. set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
  145. }
  146. static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
  147. {
  148. gfp_t gfp = GFP_KERNEL_ACCOUNT;
  149. if (mm == &init_mm)
  150. gfp &= ~__GFP_ACCOUNT;
  151. return (p4d_t *)get_zeroed_page(gfp);
  152. }
  153. static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
  154. {
  155. if (!pgtable_l5_enabled())
  156. return;
  157. BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
  158. free_page((unsigned long)p4d);
  159. }
  160. extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
  161. static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
  162. unsigned long address)
  163. {
  164. if (pgtable_l5_enabled())
  165. ___p4d_free_tlb(tlb, p4d);
  166. }
  167. #endif /* CONFIG_PGTABLE_LEVELS > 4 */
  168. #endif /* CONFIG_PGTABLE_LEVELS > 3 */
  169. #endif /* CONFIG_PGTABLE_LEVELS > 2 */
  170. #endif /* _ASM_X86_PGALLOC_H */