pgalloc.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * S390 version
  4. * Copyright IBM Corp. 1999, 2000
  5. * Author(s): Hartmut Penner (hp@de.ibm.com)
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. *
  8. * Derived from "include/asm-i386/pgalloc.h"
  9. * Copyright (C) 1994 Linus Torvalds
  10. */
  11. #ifndef _S390_PGALLOC_H
  12. #define _S390_PGALLOC_H
  13. #include <linux/threads.h>
  14. #include <linux/string.h>
  15. #include <linux/gfp.h>
  16. #include <linux/mm.h>
  17. #define CRST_ALLOC_ORDER 2
  18. unsigned long *crst_table_alloc(struct mm_struct *);
  19. void crst_table_free(struct mm_struct *, unsigned long *);
  20. unsigned long *page_table_alloc(struct mm_struct *);
  21. struct page *page_table_alloc_pgste(struct mm_struct *mm);
  22. void page_table_free(struct mm_struct *, unsigned long *);
  23. void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
  24. void page_table_free_pgste(struct page *page);
  25. extern int page_table_allocate_pgste;
  26. static inline void crst_table_init(unsigned long *crst, unsigned long entry)
  27. {
  28. memset64((u64 *)crst, entry, _CRST_ENTRIES);
  29. }
  30. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
  31. {
  32. if (mm_pmd_folded(mm))
  33. return _SEGMENT_ENTRY_EMPTY;
  34. if (mm_pud_folded(mm))
  35. return _REGION3_ENTRY_EMPTY;
  36. if (mm_p4d_folded(mm))
  37. return _REGION2_ENTRY_EMPTY;
  38. return _REGION1_ENTRY_EMPTY;
  39. }
  40. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
  41. void crst_table_downgrade(struct mm_struct *);
  42. static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
  43. {
  44. unsigned long *table = crst_table_alloc(mm);
  45. if (table)
  46. crst_table_init(table, _REGION2_ENTRY_EMPTY);
  47. return (p4d_t *) table;
  48. }
  49. #define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
  50. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
  51. {
  52. unsigned long *table = crst_table_alloc(mm);
  53. if (table)
  54. crst_table_init(table, _REGION3_ENTRY_EMPTY);
  55. return (pud_t *) table;
  56. }
  57. #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
  58. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  59. {
  60. unsigned long *table = crst_table_alloc(mm);
  61. if (!table)
  62. return NULL;
  63. crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
  64. if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
  65. crst_table_free(mm, table);
  66. return NULL;
  67. }
  68. return (pmd_t *) table;
  69. }
  70. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  71. {
  72. pgtable_pmd_page_dtor(virt_to_page(pmd));
  73. crst_table_free(mm, (unsigned long *) pmd);
  74. }
  75. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
  76. {
  77. pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
  78. }
  79. static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
  80. {
  81. p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
  82. }
  83. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  84. {
  85. pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
  86. }
  87. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  88. {
  89. unsigned long *table = crst_table_alloc(mm);
  90. if (!table)
  91. return NULL;
  92. if (mm->context.asce_limit == _REGION3_SIZE) {
  93. /* Forking a compat process with 2 page table levels */
  94. if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
  95. crst_table_free(mm, table);
  96. return NULL;
  97. }
  98. }
  99. return (pgd_t *) table;
  100. }
  101. static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  102. {
  103. if (mm->context.asce_limit == _REGION3_SIZE)
  104. pgtable_pmd_page_dtor(virt_to_page(pgd));
  105. crst_table_free(mm, (unsigned long *) pgd);
  106. }
  107. static inline void pmd_populate(struct mm_struct *mm,
  108. pmd_t *pmd, pgtable_t pte)
  109. {
  110. pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
  111. }
  112. #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
  113. #define pmd_pgtable(pmd) \
  114. (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
  115. /*
  116. * page table entry allocation/free routines.
  117. */
  118. #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
  119. #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
  120. #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
  121. #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
  122. extern void rcu_table_freelist_finish(void);
  123. void vmem_map_init(void);
  124. void *vmem_crst_alloc(unsigned long val);
  125. pte_t *vmem_pte_alloc(void);
  126. unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
  127. void base_asce_free(unsigned long asce);
  128. #endif /* _S390_PGALLOC_H */