hugetlbpage.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /*
  2. * IBM System z Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright IBM Corp. 2007
  5. * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/hugetlb.h>
  9. static inline pmd_t __pte_to_pmd(pte_t pte)
  10. {
  11. int none, young, prot;
  12. pmd_t pmd;
  13. /*
  14. * Convert encoding pte bits pmd bits
  15. * .IR...wrdytp ..R...I...y.
  16. * empty .10...000000 -> ..0...1...0.
  17. * prot-none, clean, old .11...000001 -> ..0...1...1.
  18. * prot-none, clean, young .11...000101 -> ..1...1...1.
  19. * prot-none, dirty, old .10...001001 -> ..0...1...1.
  20. * prot-none, dirty, young .10...001101 -> ..1...1...1.
  21. * read-only, clean, old .11...010001 -> ..1...1...0.
  22. * read-only, clean, young .01...010101 -> ..1...0...1.
  23. * read-only, dirty, old .11...011001 -> ..1...1...0.
  24. * read-only, dirty, young .01...011101 -> ..1...0...1.
  25. * read-write, clean, old .11...110001 -> ..0...1...0.
  26. * read-write, clean, young .01...110101 -> ..0...0...1.
  27. * read-write, dirty, old .10...111001 -> ..0...1...0.
  28. * read-write, dirty, young .00...111101 -> ..0...0...1.
  29. * Huge ptes are dirty by definition, a clean pte is made dirty
  30. * by the conversion.
  31. */
  32. if (pte_present(pte)) {
  33. pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
  34. if (pte_val(pte) & _PAGE_INVALID)
  35. pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
  36. none = (pte_val(pte) & _PAGE_PRESENT) &&
  37. !(pte_val(pte) & _PAGE_READ) &&
  38. !(pte_val(pte) & _PAGE_WRITE);
  39. prot = (pte_val(pte) & _PAGE_PROTECT) &&
  40. !(pte_val(pte) & _PAGE_WRITE);
  41. young = pte_val(pte) & _PAGE_YOUNG;
  42. if (none || young)
  43. pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
  44. if (prot || (none && young))
  45. pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
  46. } else
  47. pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
  48. return pmd;
  49. }
  50. static inline pte_t __pmd_to_pte(pmd_t pmd)
  51. {
  52. pte_t pte;
  53. /*
  54. * Convert encoding pmd bits pte bits
  55. * ..R...I...y. .IR...wrdytp
  56. * empty ..0...1...0. -> .10...000000
  57. * prot-none, old ..0...1...1. -> .10...001001
  58. * prot-none, young ..1...1...1. -> .10...001101
  59. * read-only, old ..1...1...0. -> .11...011001
  60. * read-only, young ..1...0...1. -> .01...011101
  61. * read-write, old ..0...1...0. -> .10...111001
  62. * read-write, young ..0...0...1. -> .00...111101
  63. * Huge ptes are dirty by definition
  64. */
  65. if (pmd_present(pmd)) {
  66. pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
  67. (pmd_val(pmd) & PAGE_MASK);
  68. if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
  69. pte_val(pte) |= _PAGE_INVALID;
  70. if (pmd_prot_none(pmd)) {
  71. if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
  72. pte_val(pte) |= _PAGE_YOUNG;
  73. } else {
  74. pte_val(pte) |= _PAGE_READ;
  75. if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
  76. pte_val(pte) |= _PAGE_PROTECT;
  77. else
  78. pte_val(pte) |= _PAGE_WRITE;
  79. if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
  80. pte_val(pte) |= _PAGE_YOUNG;
  81. }
  82. } else
  83. pte_val(pte) = _PAGE_INVALID;
  84. return pte;
  85. }
  86. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  87. pte_t *ptep, pte_t pte)
  88. {
  89. pmd_t pmd;
  90. pmd = __pte_to_pmd(pte);
  91. if (!MACHINE_HAS_HPAGE) {
  92. pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
  93. pmd_val(pmd) |= pte_page(pte)[1].index;
  94. } else
  95. pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
  96. *(pmd_t *) ptep = pmd;
  97. }
  98. pte_t huge_ptep_get(pte_t *ptep)
  99. {
  100. unsigned long origin;
  101. pmd_t pmd;
  102. pmd = *(pmd_t *) ptep;
  103. if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
  104. origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
  105. pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
  106. pmd_val(pmd) |= *(unsigned long *) origin;
  107. }
  108. return __pmd_to_pte(pmd);
  109. }
  110. pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  111. unsigned long addr, pte_t *ptep)
  112. {
  113. pmd_t *pmdp = (pmd_t *) ptep;
  114. pte_t pte = huge_ptep_get(ptep);
  115. pmdp_flush_direct(mm, addr, pmdp);
  116. pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
  117. return pte;
  118. }
  119. int arch_prepare_hugepage(struct page *page)
  120. {
  121. unsigned long addr = page_to_phys(page);
  122. pte_t pte;
  123. pte_t *ptep;
  124. int i;
  125. if (MACHINE_HAS_HPAGE)
  126. return 0;
  127. ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
  128. if (!ptep)
  129. return -ENOMEM;
  130. pte_val(pte) = addr;
  131. for (i = 0; i < PTRS_PER_PTE; i++) {
  132. set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
  133. pte_val(pte) += PAGE_SIZE;
  134. }
  135. page[1].index = (unsigned long) ptep;
  136. return 0;
  137. }
  138. void arch_release_hugepage(struct page *page)
  139. {
  140. pte_t *ptep;
  141. if (MACHINE_HAS_HPAGE)
  142. return;
  143. ptep = (pte_t *) page[1].index;
  144. if (!ptep)
  145. return;
  146. clear_table((unsigned long *) ptep, _PAGE_INVALID,
  147. PTRS_PER_PTE * sizeof(pte_t));
  148. page_table_free(&init_mm, (unsigned long *) ptep);
  149. page[1].index = 0;
  150. }
  151. pte_t *huge_pte_alloc(struct mm_struct *mm,
  152. unsigned long addr, unsigned long sz)
  153. {
  154. pgd_t *pgdp;
  155. pud_t *pudp;
  156. pmd_t *pmdp = NULL;
  157. pgdp = pgd_offset(mm, addr);
  158. pudp = pud_alloc(mm, pgdp, addr);
  159. if (pudp)
  160. pmdp = pmd_alloc(mm, pudp, addr);
  161. return (pte_t *) pmdp;
  162. }
  163. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  164. {
  165. pgd_t *pgdp;
  166. pud_t *pudp;
  167. pmd_t *pmdp = NULL;
  168. pgdp = pgd_offset(mm, addr);
  169. if (pgd_present(*pgdp)) {
  170. pudp = pud_offset(pgdp, addr);
  171. if (pud_present(*pudp))
  172. pmdp = pmd_offset(pudp, addr);
  173. }
  174. return (pte_t *) pmdp;
  175. }
  176. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  177. {
  178. return 0;
  179. }
  180. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  181. int write)
  182. {
  183. return ERR_PTR(-EINVAL);
  184. }
  185. int pmd_huge(pmd_t pmd)
  186. {
  187. if (!MACHINE_HAS_HPAGE)
  188. return 0;
  189. return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
  190. }
  191. int pud_huge(pud_t pud)
  192. {
  193. return 0;
  194. }
  195. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  196. pmd_t *pmdp, int write)
  197. {
  198. struct page *page;
  199. if (!MACHINE_HAS_HPAGE)
  200. return NULL;
  201. page = pmd_page(*pmdp);
  202. if (page)
  203. page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
  204. return page;
  205. }