pgalloc.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2000
  4. * Author(s): Hartmut Penner (hp@de.ibm.com)
  5. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. *
  7. * Derived from "include/asm-i386/pgalloc.h"
  8. * Copyright (C) 1994 Linus Torvalds
  9. */
  10. #ifndef _S390_PGALLOC_H
  11. #define _S390_PGALLOC_H
  12. #include <linux/threads.h>
  13. #include <linux/gfp.h>
  14. #include <linux/mm.h>
  15. #define CRST_ALLOC_ORDER 2
  16. unsigned long *crst_table_alloc(struct mm_struct *);
  17. void crst_table_free(struct mm_struct *, unsigned long *);
  18. unsigned long *page_table_alloc(struct mm_struct *);
  19. struct page *page_table_alloc_pgste(struct mm_struct *mm);
  20. void page_table_free(struct mm_struct *, unsigned long *);
  21. void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
  22. void page_table_free_pgste(struct page *page);
  23. extern int page_table_allocate_pgste;
  24. static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
  25. {
  26. struct addrtype { char _[256]; };
  27. int i;
  28. for (i = 0; i < n; i += 256) {
  29. *s = val;
  30. asm volatile(
  31. "mvc 8(248,%[s]),0(%[s])\n"
  32. : "+m" (*(struct addrtype *) s)
  33. : [s] "a" (s));
  34. s += 256 / sizeof(long);
  35. }
  36. }
  37. static inline void crst_table_init(unsigned long *crst, unsigned long entry)
  38. {
  39. clear_table(crst, entry, _CRST_TABLE_SIZE);
  40. }
  41. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
  42. {
  43. if (mm->context.asce_limit <= _REGION3_SIZE)
  44. return _SEGMENT_ENTRY_EMPTY;
  45. if (mm->context.asce_limit <= _REGION2_SIZE)
  46. return _REGION3_ENTRY_EMPTY;
  47. if (mm->context.asce_limit <= _REGION1_SIZE)
  48. return _REGION2_ENTRY_EMPTY;
  49. return _REGION1_ENTRY_EMPTY;
  50. }
  51. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
  52. void crst_table_downgrade(struct mm_struct *);
  53. static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
  54. {
  55. unsigned long *table = crst_table_alloc(mm);
  56. if (table)
  57. crst_table_init(table, _REGION2_ENTRY_EMPTY);
  58. return (p4d_t *) table;
  59. }
  60. #define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
  61. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
  62. {
  63. unsigned long *table = crst_table_alloc(mm);
  64. if (table)
  65. crst_table_init(table, _REGION3_ENTRY_EMPTY);
  66. return (pud_t *) table;
  67. }
  68. #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
  69. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  70. {
  71. unsigned long *table = crst_table_alloc(mm);
  72. if (!table)
  73. return NULL;
  74. crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
  75. if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
  76. crst_table_free(mm, table);
  77. return NULL;
  78. }
  79. return (pmd_t *) table;
  80. }
  81. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  82. {
  83. pgtable_pmd_page_dtor(virt_to_page(pmd));
  84. crst_table_free(mm, (unsigned long *) pmd);
  85. }
  86. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
  87. {
  88. pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
  89. }
  90. static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
  91. {
  92. p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
  93. }
  94. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  95. {
  96. pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
  97. }
  98. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  99. {
  100. unsigned long *table = crst_table_alloc(mm);
  101. if (!table)
  102. return NULL;
  103. if (mm->context.asce_limit == _REGION3_SIZE) {
  104. /* Forking a compat process with 2 page table levels */
  105. if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
  106. crst_table_free(mm, table);
  107. return NULL;
  108. }
  109. }
  110. return (pgd_t *) table;
  111. }
  112. static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  113. {
  114. if (mm->context.asce_limit == _REGION3_SIZE)
  115. pgtable_pmd_page_dtor(virt_to_page(pgd));
  116. crst_table_free(mm, (unsigned long *) pgd);
  117. }
  118. static inline void pmd_populate(struct mm_struct *mm,
  119. pmd_t *pmd, pgtable_t pte)
  120. {
  121. pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
  122. }
  123. #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
  124. #define pmd_pgtable(pmd) \
  125. (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
  126. /*
  127. * page table entry allocation/free routines.
  128. */
  129. #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
  130. #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
  131. #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
  132. #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
  133. extern void rcu_table_freelist_finish(void);
  134. void vmem_map_init(void);
  135. void *vmem_crst_alloc(unsigned long val);
  136. pte_t *vmem_pte_alloc(void);
  137. #endif /* _S390_PGALLOC_H */