pgtable.h 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
  2. #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
  3. /*
  4. * This file contains the functions and defines necessary to modify and use
  5. * the ppc64 hashed page table.
  6. */
  7. #include <asm/book3s/64/hash.h>
  8. #include <asm/barrier.h>
  9. /*
  10. * The second half of the kernel virtual space is used for IO mappings,
  11. * it's itself carved into the PIO region (ISA and PHB IO space) and
  12. * the ioremap space
  13. *
  14. * ISA_IO_BASE = KERN_IO_START, 64K reserved area
  15. * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
  16. * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
  17. */
  18. #define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
  19. #define FULL_IO_SIZE 0x80000000ul
  20. #define ISA_IO_BASE (KERN_IO_START)
  21. #define ISA_IO_END (KERN_IO_START + 0x10000ul)
  22. #define PHB_IO_BASE (ISA_IO_END)
  23. #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
  24. #define IOREMAP_BASE (PHB_IO_END)
  25. #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
  26. #define vmemmap ((struct page *)VMEMMAP_BASE)
  27. /* Advertise special mapping type for AGP */
  28. #define HAVE_PAGE_AGP
  29. /* Advertise support for _PAGE_SPECIAL */
  30. #define __HAVE_ARCH_PTE_SPECIAL
  31. #ifndef __ASSEMBLY__
  32. /*
  33. * This is the default implementation of various PTE accessors, it's
  34. * used in all cases except Book3S with 64K pages where we have a
  35. * concept of sub-pages
  36. */
  37. #ifndef __real_pte
  38. #ifdef CONFIG_STRICT_MM_TYPECHECKS
  39. #define __real_pte(e,p) ((real_pte_t){(e)})
  40. #define __rpte_to_pte(r) ((r).pte)
  41. #else
  42. #define __real_pte(e,p) (e)
  43. #define __rpte_to_pte(r) (__pte(r))
  44. #endif
  45. #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
  46. #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
  47. do { \
  48. index = 0; \
  49. shift = mmu_psize_defs[psize].shift; \
  50. #define pte_iterate_hashed_end() } while(0)
  51. /*
  52. * We expect this to be called only for user addresses or kernel virtual
  53. * addresses other than the linear mapping.
  54. */
  55. #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
  56. #endif /* __real_pte */
  57. static inline void pmd_set(pmd_t *pmdp, unsigned long val)
  58. {
  59. *pmdp = __pmd(val);
  60. }
  61. static inline void pmd_clear(pmd_t *pmdp)
  62. {
  63. *pmdp = __pmd(0);
  64. }
  65. #define pmd_none(pmd) (!pmd_val(pmd))
  66. #define pmd_present(pmd) (!pmd_none(pmd))
  67. static inline void pud_set(pud_t *pudp, unsigned long val)
  68. {
  69. *pudp = __pud(val);
  70. }
  71. static inline void pud_clear(pud_t *pudp)
  72. {
  73. *pudp = __pud(0);
  74. }
  75. #define pud_none(pud) (!pud_val(pud))
  76. #define pud_present(pud) (pud_val(pud) != 0)
  77. extern struct page *pud_page(pud_t pud);
  78. extern struct page *pmd_page(pmd_t pmd);
  79. static inline pte_t pud_pte(pud_t pud)
  80. {
  81. return __pte(pud_val(pud));
  82. }
  83. static inline pud_t pte_pud(pte_t pte)
  84. {
  85. return __pud(pte_val(pte));
  86. }
  87. #define pud_write(pud) pte_write(pud_pte(pud))
  88. #define pgd_write(pgd) pte_write(pgd_pte(pgd))
  89. static inline void pgd_set(pgd_t *pgdp, unsigned long val)
  90. {
  91. *pgdp = __pgd(val);
  92. }
  93. /*
  94. * Find an entry in a page-table-directory. We combine the address region
  95. * (the high order N bits) and the pgd portion of the address.
  96. */
  97. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  98. #define pmd_offset(pudp,addr) \
  99. (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
  100. #define pte_offset_kernel(dir,addr) \
  101. (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
  102. #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
  103. #define pte_unmap(pte) do { } while(0)
  104. /* to find an entry in a kernel page-table-directory */
  105. /* This now only contains the vmalloc pages */
  106. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  107. #define pte_ERROR(e) \
  108. pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
  109. #define pmd_ERROR(e) \
  110. pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
  111. #define pgd_ERROR(e) \
  112. pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  113. /* Encode and de-code a swap entry */
  114. #define MAX_SWAPFILES_CHECK() do { \
  115. BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
  116. /* \
  117. * Don't have overlapping bits with _PAGE_HPTEFLAGS \
  118. * We filter HPTEFLAGS on set_pte. \
  119. */ \
  120. BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
  121. BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
  122. } while (0)
  123. /*
  124. * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
  125. */
  126. #define SWP_TYPE_BITS 5
  127. #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
  128. & ((1UL << SWP_TYPE_BITS) - 1))
  129. #define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT)
  130. #define __swp_entry(type, offset) ((swp_entry_t) { \
  131. ((type) << _PAGE_BIT_SWAP_TYPE) \
  132. | ((offset) << PTE_RPN_SHIFT) })
  133. /*
  134. * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
  135. * swap type and offset we get from swap and convert that to pte to find a
  136. * matching pte in linux page table.
  137. * Clear bits not found in swap entries here.
  138. */
  139. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
  140. #define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
  141. #ifdef CONFIG_MEM_SOFT_DIRTY
  142. #define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
  143. #else
  144. #define _PAGE_SWP_SOFT_DIRTY 0UL
  145. #endif /* CONFIG_MEM_SOFT_DIRTY */
  146. #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
  147. static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
  148. {
  149. return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
  150. }
  151. static inline bool pte_swp_soft_dirty(pte_t pte)
  152. {
  153. return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
  154. }
  155. static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
  156. {
  157. return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
  158. }
  159. #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
  160. void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
  161. void pgtable_cache_init(void);
  162. struct page *realmode_pfn_to_page(unsigned long pfn);
  163. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  164. extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
  165. extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
  166. extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
  167. extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  168. pmd_t *pmdp, pmd_t pmd);
  169. extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
  170. pmd_t *pmd);
  171. extern int has_transparent_hugepage(void);
  172. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  173. static inline pte_t pmd_pte(pmd_t pmd)
  174. {
  175. return __pte(pmd_val(pmd));
  176. }
  177. static inline pmd_t pte_pmd(pte_t pte)
  178. {
  179. return __pmd(pte_val(pte));
  180. }
  181. static inline pte_t *pmdp_ptep(pmd_t *pmd)
  182. {
  183. return (pte_t *)pmd;
  184. }
  185. #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
  186. #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
  187. #define pmd_young(pmd) pte_young(pmd_pte(pmd))
  188. #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
  189. #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
  190. #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
  191. #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
  192. #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
  193. #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
  194. #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
  195. #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
  196. #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
  197. #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
  198. #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
  199. #ifdef CONFIG_NUMA_BALANCING
  200. static inline int pmd_protnone(pmd_t pmd)
  201. {
  202. return pte_protnone(pmd_pte(pmd));
  203. }
  204. #endif /* CONFIG_NUMA_BALANCING */
  205. #define __HAVE_ARCH_PMD_WRITE
  206. #define pmd_write(pmd) pte_write(pmd_pte(pmd))
  207. static inline pmd_t pmd_mkhuge(pmd_t pmd)
  208. {
  209. return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE));
  210. }
  211. #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  212. extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  213. unsigned long address, pmd_t *pmdp,
  214. pmd_t entry, int dirty);
  215. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  216. extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  217. unsigned long address, pmd_t *pmdp);
  218. #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  219. extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
  220. unsigned long address, pmd_t *pmdp);
  221. #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
  222. extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
  223. unsigned long addr, pmd_t *pmdp);
  224. extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
  225. unsigned long address, pmd_t *pmdp);
  226. #define pmdp_collapse_flush pmdp_collapse_flush
  227. #define __HAVE_ARCH_PGTABLE_DEPOSIT
  228. extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  229. pgtable_t pgtable);
  230. #define __HAVE_ARCH_PGTABLE_WITHDRAW
  231. extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
  232. #define __HAVE_ARCH_PMDP_INVALIDATE
  233. extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
  234. pmd_t *pmdp);
  235. #define pmd_move_must_withdraw pmd_move_must_withdraw
  236. struct spinlock;
  237. static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
  238. struct spinlock *old_pmd_ptl)
  239. {
  240. /*
  241. * Archs like ppc64 use pgtable to store per pmd
  242. * specific information. So when we switch the pmd,
  243. * we should also withdraw and deposit the pgtable
  244. */
  245. return true;
  246. }
  247. #endif /* __ASSEMBLY__ */
  248. #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */