pgtable.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #ifndef __ASM_CSKY_PGTABLE_H
  4. #define __ASM_CSKY_PGTABLE_H
  5. #include <asm/fixmap.h>
  6. #include <asm/addrspace.h>
  7. #include <abi/pgtable-bits.h>
  8. #include <asm-generic/pgtable-nopmd.h>
  9. #define PGDIR_SHIFT 22
  10. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  11. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  12. #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
  13. #define FIRST_USER_ADDRESS 0UL
  14. #define PKMAP_BASE (0xff800000)
  15. #define VMALLOC_START (0xc0008000)
  16. #define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE)
  17. /*
  18. * C-SKY is two-level paging structure:
  19. */
  20. #define PGD_ORDER 0
  21. #define PTE_ORDER 0
  22. #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
  23. #define PTRS_PER_PMD 1
  24. #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
  25. #define pte_ERROR(e) \
  26. pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
  27. #define pgd_ERROR(e) \
  28. pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  29. /* Find an entry in the third-level page table.. */
  30. #define __pte_offset_t(address) \
  31. (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  32. #define pte_offset_kernel(dir, address) \
  33. (pmd_page_vaddr(*(dir)) + __pte_offset_t(address))
  34. #define pte_offset_map(dir, address) \
  35. ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address))
  36. #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
  37. #define pte_clear(mm, addr, ptep) set_pte((ptep), \
  38. (((unsigned int)addr&0x80000000)?__pte(1):__pte(0)))
  39. #define pte_none(pte) (!(pte_val(pte)&0xfffffffe))
  40. #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
  41. #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
  42. #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
  43. | pgprot_val(prot))
  44. #define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED)
  45. #define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED)
  46. #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \
  47. _CACHE_MASK)
  48. #define pte_unmap(pte) ((void)(pte))
  49. #define __swp_type(x) (((x).val >> 4) & 0xff)
  50. #define __swp_offset(x) ((x).val >> 12)
  51. #define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \
  52. ((offset) << 12) })
  53. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  54. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  55. #define pte_page(x) pfn_to_page(pte_pfn(x))
  56. #define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \
  57. pgprot_val(pgprot))
  58. /*
  59. * CSKY can't do page protection for execute, and considers that the same like
  60. * read. Also, write permissions imply read permissions. This is the closest
  61. * we can get by reasonable means..
  62. */
  63. #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED)
  64. #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
  65. _CACHE_CACHED)
  66. #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED)
  67. #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED)
  68. #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
  69. _PAGE_GLOBAL | _CACHE_CACHED)
  70. #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
  71. _CACHE_CACHED)
  72. #define __P000 PAGE_NONE
  73. #define __P001 PAGE_READONLY
  74. #define __P010 PAGE_COPY
  75. #define __P011 PAGE_COPY
  76. #define __P100 PAGE_READONLY
  77. #define __P101 PAGE_READONLY
  78. #define __P110 PAGE_COPY
  79. #define __P111 PAGE_COPY
  80. #define __S000 PAGE_NONE
  81. #define __S001 PAGE_READONLY
  82. #define __S010 PAGE_SHARED
  83. #define __S011 PAGE_SHARED
  84. #define __S100 PAGE_READONLY
  85. #define __S101 PAGE_READONLY
  86. #define __S110 PAGE_SHARED
  87. #define __S111 PAGE_SHARED
  88. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  89. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  90. extern void load_pgd(unsigned long pg_dir);
  91. extern pte_t invalid_pte_table[PTRS_PER_PTE];
  92. static inline int pte_special(pte_t pte) { return 0; }
  93. static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
  94. static inline void set_pte(pte_t *p, pte_t pte)
  95. {
  96. *p = pte;
  97. #if defined(CONFIG_CPU_NEED_TLBSYNC)
  98. dcache_wb_line((u32)p);
  99. #endif
  100. /* prevent out of order excution */
  101. smp_mb();
  102. }
  103. #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
  104. static inline pte_t *pmd_page_vaddr(pmd_t pmd)
  105. {
  106. unsigned long ptr;
  107. ptr = pmd_val(pmd);
  108. return __va(ptr);
  109. }
  110. #define pmd_phys(pmd) pmd_val(pmd)
  111. static inline void set_pmd(pmd_t *p, pmd_t pmd)
  112. {
  113. *p = pmd;
  114. #if defined(CONFIG_CPU_NEED_TLBSYNC)
  115. dcache_wb_line((u32)p);
  116. #endif
  117. /* prevent specul excute */
  118. smp_mb();
  119. }
  120. static inline int pmd_none(pmd_t pmd)
  121. {
  122. return pmd_val(pmd) == __pa(invalid_pte_table);
  123. }
  124. #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
  125. static inline int pmd_present(pmd_t pmd)
  126. {
  127. return (pmd_val(pmd) != __pa(invalid_pte_table));
  128. }
  129. static inline void pmd_clear(pmd_t *p)
  130. {
  131. pmd_val(*p) = (__pa(invalid_pte_table));
  132. #if defined(CONFIG_CPU_NEED_TLBSYNC)
  133. dcache_wb_line((u32)p);
  134. #endif
  135. }
  136. /*
  137. * The following only work if pte_present() is true.
  138. * Undefined behaviour if not..
  139. */
  140. static inline int pte_read(pte_t pte)
  141. {
  142. return pte.pte_low & _PAGE_READ;
  143. }
  144. static inline int pte_write(pte_t pte)
  145. {
  146. return (pte).pte_low & _PAGE_WRITE;
  147. }
  148. static inline int pte_dirty(pte_t pte)
  149. {
  150. return (pte).pte_low & _PAGE_MODIFIED;
  151. }
  152. static inline int pte_young(pte_t pte)
  153. {
  154. return (pte).pte_low & _PAGE_ACCESSED;
  155. }
  156. static inline pte_t pte_wrprotect(pte_t pte)
  157. {
  158. pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
  159. return pte;
  160. }
  161. static inline pte_t pte_mkclean(pte_t pte)
  162. {
  163. pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
  164. return pte;
  165. }
  166. static inline pte_t pte_mkold(pte_t pte)
  167. {
  168. pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID);
  169. return pte;
  170. }
  171. static inline pte_t pte_mkwrite(pte_t pte)
  172. {
  173. pte_val(pte) |= _PAGE_WRITE;
  174. if (pte_val(pte) & _PAGE_MODIFIED)
  175. pte_val(pte) |= _PAGE_DIRTY;
  176. return pte;
  177. }
  178. static inline pte_t pte_mkdirty(pte_t pte)
  179. {
  180. pte_val(pte) |= _PAGE_MODIFIED;
  181. if (pte_val(pte) & _PAGE_WRITE)
  182. pte_val(pte) |= _PAGE_DIRTY;
  183. return pte;
  184. }
  185. static inline pte_t pte_mkyoung(pte_t pte)
  186. {
  187. pte_val(pte) |= _PAGE_ACCESSED;
  188. if (pte_val(pte) & _PAGE_READ)
  189. pte_val(pte) |= _PAGE_VALID;
  190. return pte;
  191. }
  192. #define __pgd_offset(address) pgd_index(address)
  193. #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  194. #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  195. /* to find an entry in a kernel page-table-directory */
  196. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  197. #define pgd_index(address) ((address) >> PGDIR_SHIFT)
  198. /*
  199. * Macro to make mark a page protection value as "uncacheable". Note
  200. * that "protection" is really a misnomer here as the protection value
  201. * contains the memory attribute bits, dirty bits, and various other
  202. * bits as well.
  203. */
  204. #define pgprot_noncached pgprot_noncached
  205. static inline pgprot_t pgprot_noncached(pgprot_t _prot)
  206. {
  207. unsigned long prot = pgprot_val(_prot);
  208. prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
  209. return __pgprot(prot);
  210. }
  211. /*
  212. * Conversion functions: convert a page and protection to a page entry,
  213. * and a page entry and page directory to the page they refer to.
  214. */
  215. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  216. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  217. {
  218. return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
  219. (pgprot_val(newprot)));
  220. }
  221. /* to find an entry in a page-table-directory */
  222. static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
  223. {
  224. return mm->pgd + pgd_index(address);
  225. }
  226. /* Find an entry in the third-level page table.. */
  227. static inline pte_t *pte_offset(pmd_t *dir, unsigned long address)
  228. {
  229. return (pte_t *) (pmd_page_vaddr(*dir)) +
  230. ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
  231. }
  232. extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  233. extern void paging_init(void);
  234. extern void show_jtlb_table(void);
  235. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  236. pte_t *pte);
  237. /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
  238. #define kern_addr_valid(addr) (1)
  239. /*
  240. * No page table caches to initialise
  241. */
  242. #define pgtable_cache_init() do {} while (0)
  243. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  244. remap_pfn_range(vma, vaddr, pfn, size, prot)
  245. #include <asm-generic/pgtable.h>
  246. #endif /* __ASM_CSKY_PGTABLE_H */