pgalloc.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_POWERPC_BOOK3S_32_PGALLOC_H
  3. #define _ASM_POWERPC_BOOK3S_32_PGALLOC_H
  4. #include <linux/threads.h>
  5. #include <linux/slab.h>
  6. /*
  7. * Functions that deal with pagetables that could be at any level of
  8. * the table need to be passed an "index_size" so they know how to
  9. * handle allocation. For PTE pages (which are linked to a struct
  10. * page for now, and drawn from the main get_free_pages() pool), the
  11. * allocation size will be (2^index_size * sizeof(pointer)) and
  12. * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
  13. *
  14. * The maximum index size needs to be big enough to allow any
  15. * pagetable sizes we need, but small enough to fit in the low bits of
  16. * any page table pointer. In other words all pagetables, even tiny
  17. * ones, must be aligned to allow at least enough low 0 bits to
  18. * contain this value. This value is also used as a mask, so it must
  19. * be one less than a power of two.
  20. */
  21. #define MAX_PGTABLE_INDEX_SIZE 0xf
  22. extern void __bad_pte(pmd_t *pmd);
  23. extern struct kmem_cache *pgtable_cache[];
  24. #define PGT_CACHE(shift) ({ \
  25. BUG_ON(!(shift)); \
  26. pgtable_cache[(shift) - 1]; \
  27. })
  28. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  29. {
  30. return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
  31. pgtable_gfp_flags(mm, GFP_KERNEL));
  32. }
  33. static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  34. {
  35. kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
  36. }
  37. /*
  38. * We don't have any real pmd's, and this code never triggers because
  39. * the pgd will always be present..
  40. */
  41. /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
  42. #define pmd_free(mm, x) do { } while (0)
  43. #define __pmd_free_tlb(tlb,x,a) do { } while (0)
  44. /* #define pgd_populate(mm, pmd, pte) BUG() */
  45. #ifndef CONFIG_BOOKE
  46. static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
  47. pte_t *pte)
  48. {
  49. *pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
  50. }
  51. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
  52. pgtable_t pte_page)
  53. {
  54. *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
  55. }
  56. #define pmd_pgtable(pmd) pmd_page(pmd)
  57. #else
  58. static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
  59. pte_t *pte)
  60. {
  61. *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
  62. }
  63. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
  64. pgtable_t pte_page)
  65. {
  66. *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
  67. }
  68. #define pmd_pgtable(pmd) pmd_page(pmd)
  69. #endif
  70. extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
  71. extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
  72. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  73. {
  74. free_page((unsigned long)pte);
  75. }
  76. static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
  77. {
  78. pgtable_page_dtor(ptepage);
  79. __free_page(ptepage);
  80. }
  81. static inline void pgtable_free(void *table, unsigned index_size)
  82. {
  83. if (!index_size) {
  84. pgtable_page_dtor(virt_to_page(table));
  85. free_page((unsigned long)table);
  86. } else {
  87. BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
  88. kmem_cache_free(PGT_CACHE(index_size), table);
  89. }
  90. }
  91. #define check_pgt_cache() do { } while (0)
  92. #define get_hugepd_cache_index(x) (x)
  93. #ifdef CONFIG_SMP
  94. static inline void pgtable_free_tlb(struct mmu_gather *tlb,
  95. void *table, int shift)
  96. {
  97. unsigned long pgf = (unsigned long)table;
  98. BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
  99. pgf |= shift;
  100. tlb_remove_table(tlb, (void *)pgf);
  101. }
  102. static inline void __tlb_remove_table(void *_table)
  103. {
  104. void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
  105. unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
  106. pgtable_free(table, shift);
  107. }
  108. #else
  109. static inline void pgtable_free_tlb(struct mmu_gather *tlb,
  110. void *table, int shift)
  111. {
  112. pgtable_free(table, shift);
  113. }
  114. #endif
  115. static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
  116. unsigned long address)
  117. {
  118. pgtable_free_tlb(tlb, page_address(table), 0);
  119. }
  120. #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */