pgtable.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /*
  2. * OpenRISC Linux
  3. *
  4. * Linux architectural port borrowing liberally from similar works of
  5. * others. All original copyrights apply as per the original source
  6. * declaration.
  7. *
  8. * OpenRISC implementation:
  9. * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  10. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  11. * et al.
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. */
  18. /* or32 pgtable.h - macros and functions to manipulate page tables
  19. *
  20. * Based on:
  21. * include/asm-cris/pgtable.h
  22. */
  23. #ifndef __ASM_OPENRISC_PGTABLE_H
  24. #define __ASM_OPENRISC_PGTABLE_H
  25. #define __ARCH_USE_5LEVEL_HACK
  26. #include <asm-generic/pgtable-nopmd.h>
  27. #ifndef __ASSEMBLY__
  28. #include <asm/mmu.h>
  29. #include <asm/fixmap.h>
  30. /*
  31. * The Linux memory management assumes a three-level page table setup. On
  32. * or32, we use that, but "fold" the mid level into the top-level page
  33. * table. Since the MMU TLB is software loaded through an interrupt, it
  34. * supports any page table structure, so we could have used a three-level
  35. * setup, but for the amounts of memory we normally use, a two-level is
  36. * probably more efficient.
  37. *
  38. * This file contains the functions and defines necessary to modify and use
  39. * the or32 page table tree.
  40. */
  41. extern void paging_init(void);
  42. /* Certain architectures need to do special things when pte's
  43. * within a page table are directly modified. Thus, the following
  44. * hook is made available.
  45. */
  46. #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
  47. #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
  48. /*
  49. * (pmds are folded into pgds so this doesn't get actually called,
  50. * but the define is needed for a generic inline function.)
  51. */
  52. #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
  53. #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))
  54. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  55. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  56. /*
  57. * entries per page directory level: we use a two-level, so
  58. * we don't really have any PMD directory physically.
  59. * pointers are 4 bytes so we can use the page size and
  60. * divide it by 4 (shift by 2).
  61. */
  62. #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))
  63. #define PTRS_PER_PGD (1UL << (32-PGDIR_SHIFT))
  64. /* calculate how many PGD entries a user-level program can use
  65. * the first mappable virtual address is 0
  66. * (TASK_SIZE is the maximum virtual address space)
  67. */
  68. #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
  69. #define FIRST_USER_ADDRESS 0UL
  70. /*
  71. * Kernels own virtual memory area.
  72. */
  73. /*
  74. * The size and location of the vmalloc area are chosen so that modules
  75. * placed in this area aren't more than a 28-bit signed offset from any
  76. * kernel functions that they may need. This greatly simplifies handling
  77. * of the relocations for l.j and l.jal instructions as we don't need to
  78. * introduce any trampolines for reaching "distant" code.
  79. *
  80. * 64 MB of vmalloc area is comparable to what's available on other arches.
  81. */
  82. #define VMALLOC_START (PAGE_OFFSET-0x04000000UL)
  83. #define VMALLOC_END (PAGE_OFFSET)
  84. #define VMALLOC_VMADDR(x) ((unsigned long)(x))
  85. /* Define some higher level generic page attributes.
  86. *
  87. * If you change _PAGE_CI definition be sure to change it in
  88. * io.h for ioremap_nocache() too.
  89. */
  90. /*
  91. * An OR32 PTE looks like this:
  92. *
  93. * | 31 ... 10 | 9 | 8 ... 6 | 5 | 4 | 3 | 2 | 1 | 0 |
  94. * Phys pg.num L PP Index D A WOM WBC CI CC
  95. *
  96. * L : link
  97. * PPI: Page protection index
  98. * D : Dirty
  99. * A : Accessed
  100. * WOM: Weakly ordered memory
  101. * WBC: Write-back cache
  102. * CI : Cache inhibit
  103. * CC : Cache coherent
  104. *
  105. * The protection bits below should correspond to the layout of the actual
  106. * PTE as per above
  107. */
  108. #define _PAGE_CC 0x001 /* software: pte contains a translation */
  109. #define _PAGE_CI 0x002 /* cache inhibit */
  110. #define _PAGE_WBC 0x004 /* write back cache */
  111. #define _PAGE_WOM 0x008 /* weakly ordered memory */
  112. #define _PAGE_A 0x010 /* accessed */
  113. #define _PAGE_D 0x020 /* dirty */
  114. #define _PAGE_URE 0x040 /* user read enable */
  115. #define _PAGE_UWE 0x080 /* user write enable */
  116. #define _PAGE_SRE 0x100 /* superuser read enable */
  117. #define _PAGE_SWE 0x200 /* superuser write enable */
  118. #define _PAGE_EXEC 0x400 /* software: page is executable */
  119. #define _PAGE_U_SHARED 0x800 /* software: page is shared in user space */
  120. /* 0x001 is cache coherency bit, which should always be set to
  121. * 1 - for SMP (when we support it)
  122. * 0 - otherwise
  123. *
  124. * we just reuse this bit in software for _PAGE_PRESENT and
  125. * force it to 0 when loading it into TLB.
  126. */
  127. #define _PAGE_PRESENT _PAGE_CC
  128. #define _PAGE_USER _PAGE_URE
  129. #define _PAGE_WRITE (_PAGE_UWE | _PAGE_SWE)
  130. #define _PAGE_DIRTY _PAGE_D
  131. #define _PAGE_ACCESSED _PAGE_A
  132. #define _PAGE_NO_CACHE _PAGE_CI
  133. #define _PAGE_SHARED _PAGE_U_SHARED
  134. #define _PAGE_READ (_PAGE_URE | _PAGE_SRE)
  135. #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
  136. #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
  137. #define _PAGE_ALL (_PAGE_PRESENT | _PAGE_ACCESSED)
  138. #define _KERNPG_TABLE \
  139. (_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY)
  140. #define PAGE_NONE __pgprot(_PAGE_ALL)
  141. #define PAGE_READONLY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
  142. #define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
  143. #define PAGE_SHARED \
  144. __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
  145. | _PAGE_SHARED)
  146. #define PAGE_SHARED_X \
  147. __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
  148. | _PAGE_SHARED | _PAGE_EXEC)
  149. #define PAGE_COPY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
  150. #define PAGE_COPY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
  151. #define PAGE_KERNEL \
  152. __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
  153. | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
  154. #define PAGE_KERNEL_RO \
  155. __pgprot(_PAGE_ALL | _PAGE_SRE \
  156. | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
  157. #define PAGE_KERNEL_NOCACHE \
  158. __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
  159. | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
  160. #define __P000 PAGE_NONE
  161. #define __P001 PAGE_READONLY_X
  162. #define __P010 PAGE_COPY
  163. #define __P011 PAGE_COPY_X
  164. #define __P100 PAGE_READONLY
  165. #define __P101 PAGE_READONLY_X
  166. #define __P110 PAGE_COPY
  167. #define __P111 PAGE_COPY_X
  168. #define __S000 PAGE_NONE
  169. #define __S001 PAGE_READONLY_X
  170. #define __S010 PAGE_SHARED
  171. #define __S011 PAGE_SHARED_X
  172. #define __S100 PAGE_READONLY
  173. #define __S101 PAGE_READONLY_X
  174. #define __S110 PAGE_SHARED
  175. #define __S111 PAGE_SHARED_X
  176. /* zero page used for uninitialized stuff */
  177. extern unsigned long empty_zero_page[2048];
  178. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  179. /* number of bits that fit into a memory pointer */
  180. #define BITS_PER_PTR (8*sizeof(unsigned long))
  181. /* to align the pointer to a pointer address */
  182. #define PTR_MASK (~(sizeof(void *)-1))
  183. /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
  184. /* 64-bit machines, beware! SRB. */
  185. #define SIZEOF_PTR_LOG2 2
  186. /* to find an entry in a page-table */
  187. #define PAGE_PTR(address) \
  188. ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
  189. /* to set the page-dir */
  190. #define SET_PAGE_DIR(tsk, pgdir)
  191. #define pte_none(x) (!pte_val(x))
  192. #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
  193. #define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0)
  194. #define pmd_none(x) (!pmd_val(x))
  195. #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK)) != _KERNPG_TABLE)
  196. #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
  197. #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
  198. /*
  199. * The following only work if pte_present() is true.
  200. * Undefined behaviour if not..
  201. */
  202. static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }
  203. static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
  204. static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
  205. static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
  206. static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
  207. static inline int pte_special(pte_t pte) { return 0; }
  208. static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
  209. static inline pte_t pte_wrprotect(pte_t pte)
  210. {
  211. pte_val(pte) &= ~(_PAGE_WRITE);
  212. return pte;
  213. }
  214. static inline pte_t pte_rdprotect(pte_t pte)
  215. {
  216. pte_val(pte) &= ~(_PAGE_READ);
  217. return pte;
  218. }
  219. static inline pte_t pte_exprotect(pte_t pte)
  220. {
  221. pte_val(pte) &= ~(_PAGE_EXEC);
  222. return pte;
  223. }
  224. static inline pte_t pte_mkclean(pte_t pte)
  225. {
  226. pte_val(pte) &= ~(_PAGE_DIRTY);
  227. return pte;
  228. }
  229. static inline pte_t pte_mkold(pte_t pte)
  230. {
  231. pte_val(pte) &= ~(_PAGE_ACCESSED);
  232. return pte;
  233. }
  234. static inline pte_t pte_mkwrite(pte_t pte)
  235. {
  236. pte_val(pte) |= _PAGE_WRITE;
  237. return pte;
  238. }
  239. static inline pte_t pte_mkread(pte_t pte)
  240. {
  241. pte_val(pte) |= _PAGE_READ;
  242. return pte;
  243. }
  244. static inline pte_t pte_mkexec(pte_t pte)
  245. {
  246. pte_val(pte) |= _PAGE_EXEC;
  247. return pte;
  248. }
  249. static inline pte_t pte_mkdirty(pte_t pte)
  250. {
  251. pte_val(pte) |= _PAGE_DIRTY;
  252. return pte;
  253. }
  254. static inline pte_t pte_mkyoung(pte_t pte)
  255. {
  256. pte_val(pte) |= _PAGE_ACCESSED;
  257. return pte;
  258. }
  259. /*
  260. * Conversion functions: convert a page and protection to a page entry,
  261. * and a page entry and page directory to the page they refer to.
  262. */
  263. /* What actually goes as arguments to the various functions is less than
  264. * obvious, but a rule of thumb is that struct page's goes as struct page *,
  265. * really physical DRAM addresses are unsigned long's, and DRAM "virtual"
  266. * addresses (the 0xc0xxxxxx's) goes as void *'s.
  267. */
  268. static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
  269. {
  270. pte_t pte;
  271. /* the PTE needs a physical address */
  272. pte_val(pte) = __pa(page) | pgprot_val(pgprot);
  273. return pte;
  274. }
  275. #define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
  276. #define mk_pte_phys(physpage, pgprot) \
  277. ({ \
  278. pte_t __pte; \
  279. \
  280. pte_val(__pte) = (physpage) + pgprot_val(pgprot); \
  281. __pte; \
  282. })
  283. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  284. {
  285. pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
  286. return pte;
  287. }
  288. /*
  289. * pte_val refers to a page in the 0x0xxxxxxx physical DRAM interval
  290. * __pte_page(pte_val) refers to the "virtual" DRAM interval
  291. * pte_pagenr refers to the page-number counted starting from the virtual
  292. * DRAM start
  293. */
  294. static inline unsigned long __pte_page(pte_t pte)
  295. {
  296. /* the PTE contains a physical address */
  297. return (unsigned long)__va(pte_val(pte) & PAGE_MASK);
  298. }
  299. #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
  300. /* permanent address of a page */
  301. #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
  302. #define pte_page(pte) (mem_map+pte_pagenr(pte))
  303. /*
  304. * only the pte's themselves need to point to physical DRAM (see above)
  305. * the pagetable links are purely handled within the kernel SW and thus
  306. * don't need the __pa and __va transformations.
  307. */
  308. static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
  309. {
  310. pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep;
  311. }
  312. #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
  313. #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  314. /* to find an entry in a page-table-directory. */
  315. #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  316. #define __pgd_offset(address) pgd_index(address)
  317. #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  318. /* to find an entry in a kernel page-table-directory */
  319. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  320. #define __pmd_offset(address) \
  321. (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  322. /*
  323. * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  324. *
  325. * this macro returns the index of the entry in the pte page which would
  326. * control the given virtual address
  327. */
  328. #define __pte_offset(address) \
  329. (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  330. #define pte_offset_kernel(dir, address) \
  331. ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address))
  332. #define pte_offset_map(dir, address) \
  333. ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
  334. #define pte_offset_map_nested(dir, address) \
  335. pte_offset_map(dir, address)
  336. #define pte_unmap(pte) do { } while (0)
  337. #define pte_unmap_nested(pte) do { } while (0)
  338. #define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT)
  339. #define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
  340. #define pte_ERROR(e) \
  341. printk(KERN_ERR "%s:%d: bad pte %p(%08lx).\n", \
  342. __FILE__, __LINE__, &(e), pte_val(e))
  343. #define pgd_ERROR(e) \
  344. printk(KERN_ERR "%s:%d: bad pgd %p(%08lx).\n", \
  345. __FILE__, __LINE__, &(e), pgd_val(e))
  346. extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
  347. struct vm_area_struct;
  348. static inline void update_tlb(struct vm_area_struct *vma,
  349. unsigned long address, pte_t *pte)
  350. {
  351. }
  352. extern void update_cache(struct vm_area_struct *vma,
  353. unsigned long address, pte_t *pte);
  354. static inline void update_mmu_cache(struct vm_area_struct *vma,
  355. unsigned long address, pte_t *pte)
  356. {
  357. update_tlb(vma, address, pte);
  358. update_cache(vma, address, pte);
  359. }
  360. /* __PHX__ FIXME, SWAP, this probably doesn't work */
  361. /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
  362. /* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */
  363. #define __swp_type(x) (((x).val >> 5) & 0x7f)
  364. #define __swp_offset(x) ((x).val >> 12)
  365. #define __swp_entry(type, offset) \
  366. ((swp_entry_t) { ((type) << 5) | ((offset) << 12) })
  367. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  368. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  369. #define kern_addr_valid(addr) (1)
  370. #include <asm-generic/pgtable.h>
  371. /*
  372. * No page table caches to initialise
  373. */
  374. #define pgtable_cache_init() do { } while (0)
  375. typedef pte_t *pte_addr_t;
  376. #endif /* __ASSEMBLY__ */
  377. #endif /* __ASM_OPENRISC_PGTABLE_H */