pgtable-64.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_PGTABLE_64_H
  10. #define _ASM_PGTABLE_64_H
  11. #include <linux/compiler.h>
  12. #include <linux/linkage.h>
  13. #include <asm/addrspace.h>
  14. #include <asm/page.h>
  15. #include <asm/cachectl.h>
  16. #include <asm/fixmap.h>
  17. #define __ARCH_USE_5LEVEL_HACK
  18. #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
  19. #include <asm-generic/pgtable-nopmd.h>
  20. #elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
  21. #include <asm-generic/pgtable-nopud.h>
  22. #endif
  23. /*
  24. * Each address space has 2 4K pages as its page directory, giving 1024
  25. * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
  26. * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
  27. * tables. Each page table is also a single 4K page, giving 512 (==
  28. * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
  29. * invalid_pmd_table, each pmd entry is initialized to point to
  30. * invalid_pte_table, each pte is initialized to 0.
  31. *
  32. * Kernel mappings: kernel mappings are held in the swapper_pg_table.
  33. * The layout is identical to userspace except it's indexed with the
  34. * fault address - VMALLOC_START.
  35. */
  36. /* PGDIR_SHIFT determines what a third-level page table entry can map */
  37. #ifdef __PAGETABLE_PMD_FOLDED
  38. #define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
  39. #else
  40. /* PMD_SHIFT determines the size of the area a second-level page table can map */
  41. #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
  42. #define PMD_SIZE (1UL << PMD_SHIFT)
  43. #define PMD_MASK (~(PMD_SIZE-1))
  44. # ifdef __PAGETABLE_PUD_FOLDED
  45. # define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
  46. # endif
  47. #endif
  48. #ifndef __PAGETABLE_PUD_FOLDED
  49. #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
  50. #define PUD_SIZE (1UL << PUD_SHIFT)
  51. #define PUD_MASK (~(PUD_SIZE-1))
  52. #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
  53. #endif
  54. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  55. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  56. /*
  57. * For 4kB page size we use a 3 level page tree and an 8kB pud, which
  58. * permits us mapping 40 bits of virtual address space.
  59. *
  60. * We used to implement 41 bits by having an order 1 pmd level but that seemed
  61. * rather pointless.
  62. *
  63. * For 8kB page size we use a 3 level page tree which permits a total of
  64. * 8TB of address space. Alternatively a 33-bit / 8GB organization using
  65. * two levels would be easy to implement.
  66. *
  67. * For 16kB page size we use a 2 level page tree which permits a total of
  68. * 36 bits of virtual address space. We could add a third level but it seems
  69. * like at the moment there's no need for this.
  70. *
  71. * For 64kB page size we use a 2 level page table tree for a total of 42 bits
  72. * of virtual address space.
  73. */
  74. #ifdef CONFIG_PAGE_SIZE_4KB
  75. # ifdef CONFIG_MIPS_VA_BITS_48
  76. # define PGD_ORDER 0
  77. # define PUD_ORDER 0
  78. # else
  79. # define PGD_ORDER 1
  80. # define PUD_ORDER aieeee_attempt_to_allocate_pud
  81. # endif
  82. #define PMD_ORDER 0
  83. #define PTE_ORDER 0
  84. #endif
  85. #ifdef CONFIG_PAGE_SIZE_8KB
  86. #define PGD_ORDER 0
  87. #define PUD_ORDER aieeee_attempt_to_allocate_pud
  88. #define PMD_ORDER 0
  89. #define PTE_ORDER 0
  90. #endif
  91. #ifdef CONFIG_PAGE_SIZE_16KB
  92. #ifdef CONFIG_MIPS_VA_BITS_48
  93. #define PGD_ORDER 1
  94. #else
  95. #define PGD_ORDER 0
  96. #endif
  97. #define PUD_ORDER aieeee_attempt_to_allocate_pud
  98. #define PMD_ORDER 0
  99. #define PTE_ORDER 0
  100. #endif
  101. #ifdef CONFIG_PAGE_SIZE_32KB
  102. #define PGD_ORDER 0
  103. #define PUD_ORDER aieeee_attempt_to_allocate_pud
  104. #define PMD_ORDER 0
  105. #define PTE_ORDER 0
  106. #endif
  107. #ifdef CONFIG_PAGE_SIZE_64KB
  108. #define PGD_ORDER 0
  109. #define PUD_ORDER aieeee_attempt_to_allocate_pud
  110. #ifdef CONFIG_MIPS_VA_BITS_48
  111. #define PMD_ORDER 0
  112. #else
  113. #define PMD_ORDER aieeee_attempt_to_allocate_pmd
  114. #endif
  115. #define PTE_ORDER 0
  116. #endif
  117. #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
  118. #ifndef __PAGETABLE_PUD_FOLDED
  119. #define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
  120. #endif
  121. #ifndef __PAGETABLE_PMD_FOLDED
  122. #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
  123. #endif
  124. #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
  125. #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
  126. #define FIRST_USER_ADDRESS 0UL
  127. /*
  128. * TLB refill handlers also map the vmalloc area into xuseg. Avoid
  129. * the first couple of pages so NULL pointer dereferences will still
  130. * reliably trap.
  131. */
  132. #define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
  133. #define VMALLOC_END \
  134. (MAP_BASE + \
  135. min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
  136. (1UL << cpu_vmbits)) - (1UL << 32))
  137. #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
  138. VMALLOC_START != CKSSEG
  139. /* Load modules into 32bit-compatible segment. */
  140. #define MODULE_START CKSSEG
  141. #define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
  142. #endif
  143. #define pte_ERROR(e) \
  144. printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
  145. #ifndef __PAGETABLE_PMD_FOLDED
  146. #define pmd_ERROR(e) \
  147. printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
  148. #endif
  149. #ifndef __PAGETABLE_PUD_FOLDED
  150. #define pud_ERROR(e) \
  151. printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
  152. #endif
  153. #define pgd_ERROR(e) \
  154. printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
  155. extern pte_t invalid_pte_table[PTRS_PER_PTE];
  156. #ifndef __PAGETABLE_PUD_FOLDED
  157. /*
  158. * For 4-level pagetables we defines these ourselves, for 3-level the
  159. * definitions are below, for 2-level the
  160. * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
  161. */
  162. typedef struct { unsigned long pud; } pud_t;
  163. #define pud_val(x) ((x).pud)
  164. #define __pud(x) ((pud_t) { (x) })
  165. extern pud_t invalid_pud_table[PTRS_PER_PUD];
  166. /*
  167. * Empty pgd entries point to the invalid_pud_table.
  168. */
  169. static inline int pgd_none(pgd_t pgd)
  170. {
  171. return pgd_val(pgd) == (unsigned long)invalid_pud_table;
  172. }
  173. static inline int pgd_bad(pgd_t pgd)
  174. {
  175. if (unlikely(pgd_val(pgd) & ~PAGE_MASK))
  176. return 1;
  177. return 0;
  178. }
  179. static inline int pgd_present(pgd_t pgd)
  180. {
  181. return pgd_val(pgd) != (unsigned long)invalid_pud_table;
  182. }
  183. static inline void pgd_clear(pgd_t *pgdp)
  184. {
  185. pgd_val(*pgdp) = (unsigned long)invalid_pud_table;
  186. }
  187. #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
  188. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  189. {
  190. return pgd_val(pgd);
  191. }
  192. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  193. {
  194. return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
  195. }
  196. static inline void set_pgd(pgd_t *pgd, pgd_t pgdval)
  197. {
  198. *pgd = pgdval;
  199. }
  200. #endif
  201. #ifndef __PAGETABLE_PMD_FOLDED
  202. /*
  203. * For 3-level pagetables we defines these ourselves, for 2-level the
  204. * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
  205. */
  206. typedef struct { unsigned long pmd; } pmd_t;
  207. #define pmd_val(x) ((x).pmd)
  208. #define __pmd(x) ((pmd_t) { (x) } )
  209. extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
  210. #endif
  211. /*
  212. * Empty pgd/pmd entries point to the invalid_pte_table.
  213. */
  214. static inline int pmd_none(pmd_t pmd)
  215. {
  216. return pmd_val(pmd) == (unsigned long) invalid_pte_table;
  217. }
  218. static inline int pmd_bad(pmd_t pmd)
  219. {
  220. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  221. /* pmd_huge(pmd) but inline */
  222. if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
  223. return 0;
  224. #endif
  225. if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
  226. return 1;
  227. return 0;
  228. }
  229. static inline int pmd_present(pmd_t pmd)
  230. {
  231. return pmd_val(pmd) != (unsigned long) invalid_pte_table;
  232. }
  233. static inline void pmd_clear(pmd_t *pmdp)
  234. {
  235. pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
  236. }
  237. #ifndef __PAGETABLE_PMD_FOLDED
  238. /*
  239. * Empty pud entries point to the invalid_pmd_table.
  240. */
  241. static inline int pud_none(pud_t pud)
  242. {
  243. return pud_val(pud) == (unsigned long) invalid_pmd_table;
  244. }
  245. static inline int pud_bad(pud_t pud)
  246. {
  247. return pud_val(pud) & ~PAGE_MASK;
  248. }
  249. static inline int pud_present(pud_t pud)
  250. {
  251. return pud_val(pud) != (unsigned long) invalid_pmd_table;
  252. }
  253. static inline void pud_clear(pud_t *pudp)
  254. {
  255. pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
  256. }
  257. #endif
  258. #define pte_page(x) pfn_to_page(pte_pfn(x))
  259. #ifdef CONFIG_CPU_VR41XX
  260. #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
  261. #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
  262. #else
  263. #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
  264. #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
  265. #define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
  266. #endif
  267. #define __pgd_offset(address) pgd_index(address)
  268. #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  269. #define __pmd_offset(address) pmd_index(address)
  270. /* to find an entry in a kernel page-table-directory */
  271. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  272. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  273. #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  274. /* to find an entry in a page-table-directory */
  275. #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
  276. #ifndef __PAGETABLE_PMD_FOLDED
  277. static inline unsigned long pud_page_vaddr(pud_t pud)
  278. {
  279. return pud_val(pud);
  280. }
  281. #define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
  282. #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
  283. /* Find an entry in the second-level page table.. */
  284. static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
  285. {
  286. return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
  287. }
  288. #endif
  289. /* Find an entry in the third-level page table.. */
  290. #define __pte_offset(address) \
  291. (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  292. #define pte_offset(dir, address) \
  293. ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
  294. #define pte_offset_kernel(dir, address) \
  295. ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
  296. #define pte_offset_map(dir, address) \
  297. ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
  298. #define pte_unmap(pte) ((void)(pte))
  299. /*
  300. * Initialize a new pgd / pmd table with invalid pointers.
  301. */
  302. extern void pgd_init(unsigned long page);
  303. extern void pud_init(unsigned long page, unsigned long pagetable);
  304. extern void pmd_init(unsigned long page, unsigned long pagetable);
  305. /*
  306. * Non-present pages: high 40 bits are offset, next 8 bits type,
  307. * low 16 bits zero.
  308. */
  309. static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
  310. { pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
  311. #define __swp_type(x) (((x).val >> 16) & 0xff)
  312. #define __swp_offset(x) ((x).val >> 24)
  313. #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
  314. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  315. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  316. #endif /* _ASM_PGTABLE_64_H */