pgtable.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. #ifndef _ASM_POWERPC_PGTABLE_H
  2. #define _ASM_POWERPC_PGTABLE_H
  3. #ifdef __KERNEL__
  4. #ifndef __ASSEMBLY__
  5. #include <linux/mmdebug.h>
  6. #include <asm/processor.h> /* For TASK_SIZE */
  7. #include <asm/mmu.h>
  8. #include <asm/page.h>
  9. struct mm_struct;
  10. #endif /* !__ASSEMBLY__ */
  11. #if defined(CONFIG_PPC64)
  12. # include <asm/pgtable-ppc64.h>
  13. #else
  14. # include <asm/pgtable-ppc32.h>
  15. #endif
  16. /*
  17. * We save the slot number & secondary bit in the second half of the
  18. * PTE page. We use the 8 bytes per each pte entry.
  19. */
  20. #define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8)
  21. #ifndef __ASSEMBLY__
  22. #include <asm/tlbflush.h>
  23. /* Generic accessors to PTE bits */
  24. static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
  25. static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
  26. static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
  27. static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
  28. static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
  29. static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
  30. static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
  31. #ifdef CONFIG_NUMA_BALANCING
  32. static inline int pte_present(pte_t pte)
  33. {
  34. return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA);
  35. }
  36. #define pte_numa pte_numa
  37. static inline int pte_numa(pte_t pte)
  38. {
  39. return (pte_val(pte) &
  40. (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
  41. }
  42. #define pte_mknonnuma pte_mknonnuma
  43. static inline pte_t pte_mknonnuma(pte_t pte)
  44. {
  45. pte_val(pte) &= ~_PAGE_NUMA;
  46. pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED;
  47. return pte;
  48. }
  49. #define pte_mknuma pte_mknuma
  50. static inline pte_t pte_mknuma(pte_t pte)
  51. {
  52. /*
  53. * We should not set _PAGE_NUMA on non present ptes. Also clear the
  54. * present bit so that hash_page will return 1 and we collect this
  55. * as numa fault.
  56. */
  57. if (pte_present(pte)) {
  58. pte_val(pte) |= _PAGE_NUMA;
  59. pte_val(pte) &= ~_PAGE_PRESENT;
  60. } else
  61. VM_BUG_ON(1);
  62. return pte;
  63. }
  64. #define ptep_set_numa ptep_set_numa
  65. static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
  66. pte_t *ptep)
  67. {
  68. if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
  69. VM_BUG_ON(1);
  70. pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
  71. return;
  72. }
  73. #define pmd_numa pmd_numa
  74. static inline int pmd_numa(pmd_t pmd)
  75. {
  76. return pte_numa(pmd_pte(pmd));
  77. }
  78. #define pmdp_set_numa pmdp_set_numa
  79. static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
  80. pmd_t *pmdp)
  81. {
  82. if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
  83. VM_BUG_ON(1);
  84. pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
  85. return;
  86. }
  87. #define pmd_mknonnuma pmd_mknonnuma
  88. static inline pmd_t pmd_mknonnuma(pmd_t pmd)
  89. {
  90. return pte_pmd(pte_mknonnuma(pmd_pte(pmd)));
  91. }
  92. #define pmd_mknuma pmd_mknuma
  93. static inline pmd_t pmd_mknuma(pmd_t pmd)
  94. {
  95. return pte_pmd(pte_mknuma(pmd_pte(pmd)));
  96. }
  97. # else
  98. static inline int pte_present(pte_t pte)
  99. {
  100. return pte_val(pte) & _PAGE_PRESENT;
  101. }
  102. #endif /* CONFIG_NUMA_BALANCING */
  103. /* Conversion functions: convert a page and protection to a page entry,
  104. * and a page entry and page directory to the page they refer to.
  105. *
  106. * Even if PTEs can be unsigned long long, a PFN is always an unsigned
  107. * long for now.
  108. */
  109. static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
  110. return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
  111. pgprot_val(pgprot)); }
  112. static inline unsigned long pte_pfn(pte_t pte) {
  113. return pte_val(pte) >> PTE_RPN_SHIFT; }
  114. /* Keep these as a macros to avoid include dependency mess */
  115. #define pte_page(x) pfn_to_page(pte_pfn(x))
  116. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  117. /* Generic modifiers for PTE bits */
  118. static inline pte_t pte_wrprotect(pte_t pte) {
  119. pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
  120. static inline pte_t pte_mkclean(pte_t pte) {
  121. pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
  122. static inline pte_t pte_mkold(pte_t pte) {
  123. pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
  124. static inline pte_t pte_mkwrite(pte_t pte) {
  125. pte_val(pte) |= _PAGE_RW; return pte; }
  126. static inline pte_t pte_mkdirty(pte_t pte) {
  127. pte_val(pte) |= _PAGE_DIRTY; return pte; }
  128. static inline pte_t pte_mkyoung(pte_t pte) {
  129. pte_val(pte) |= _PAGE_ACCESSED; return pte; }
  130. static inline pte_t pte_mkspecial(pte_t pte) {
  131. pte_val(pte) |= _PAGE_SPECIAL; return pte; }
  132. static inline pte_t pte_mkhuge(pte_t pte) {
  133. return pte; }
  134. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  135. {
  136. pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
  137. return pte;
  138. }
  139. /* Insert a PTE, top-level function is out of line. It uses an inline
  140. * low level function in the respective pgtable-* files
  141. */
  142. extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
  143. pte_t pte);
  144. /* This low level function performs the actual PTE insertion
  145. * Setting the PTE depends on the MMU type and other factors. It's
  146. * an horrible mess that I'm not going to try to clean up now but
  147. * I'm keeping it in one place rather than spread around
  148. */
  149. static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
  150. pte_t *ptep, pte_t pte, int percpu)
  151. {
  152. #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
  153. /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
  154. * helper pte_update() which does an atomic update. We need to do that
  155. * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
  156. * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
  157. * the hash bits instead (ie, same as the non-SMP case)
  158. */
  159. if (percpu)
  160. *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
  161. | (pte_val(pte) & ~_PAGE_HASHPTE));
  162. else
  163. pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
  164. #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
  165. /* Second case is 32-bit with 64-bit PTE. In this case, we
  166. * can just store as long as we do the two halves in the right order
  167. * with a barrier in between. This is possible because we take care,
  168. * in the hash code, to pre-invalidate if the PTE was already hashed,
  169. * which synchronizes us with any concurrent invalidation.
  170. * In the percpu case, we also fallback to the simple update preserving
  171. * the hash bits
  172. */
  173. if (percpu) {
  174. *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
  175. | (pte_val(pte) & ~_PAGE_HASHPTE));
  176. return;
  177. }
  178. #if _PAGE_HASHPTE != 0
  179. if (pte_val(*ptep) & _PAGE_HASHPTE)
  180. flush_hash_entry(mm, ptep, addr);
  181. #endif
  182. __asm__ __volatile__("\
  183. stw%U0%X0 %2,%0\n\
  184. eieio\n\
  185. stw%U0%X0 %L2,%1"
  186. : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
  187. : "r" (pte) : "memory");
  188. #elif defined(CONFIG_PPC_STD_MMU_32)
  189. /* Third case is 32-bit hash table in UP mode, we need to preserve
  190. * the _PAGE_HASHPTE bit since we may not have invalidated the previous
  191. * translation in the hash yet (done in a subsequent flush_tlb_xxx())
  192. * and see we need to keep track that this PTE needs invalidating
  193. */
  194. *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
  195. | (pte_val(pte) & ~_PAGE_HASHPTE));
  196. #else
  197. /* Anything else just stores the PTE normally. That covers all 64-bit
  198. * cases, and 32-bit non-hash with 32-bit PTEs.
  199. */
  200. *ptep = pte;
  201. #endif
  202. }
  203. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  204. extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
  205. pte_t *ptep, pte_t entry, int dirty);
  206. /*
  207. * Macro to mark a page protection value as "uncacheable".
  208. */
  209. #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
  210. _PAGE_WRITETHRU)
  211. #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  212. _PAGE_NO_CACHE | _PAGE_GUARDED))
  213. #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  214. _PAGE_NO_CACHE))
  215. #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  216. _PAGE_COHERENT))
  217. #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  218. _PAGE_COHERENT | _PAGE_WRITETHRU))
  219. #define pgprot_cached_noncoherent(prot) \
  220. (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
  221. #define pgprot_writecombine pgprot_noncached_wc
  222. struct file;
  223. extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  224. unsigned long size, pgprot_t vma_prot);
  225. #define __HAVE_PHYS_MEM_ACCESS_PROT
  226. /*
  227. * ZERO_PAGE is a global shared page that is always zero: used
  228. * for zero-mapped memory areas etc..
  229. */
  230. extern unsigned long empty_zero_page[];
  231. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  232. extern pgd_t swapper_pg_dir[];
  233. extern void paging_init(void);
  234. /*
  235. * kern_addr_valid is intended to indicate whether an address is a valid
  236. * kernel address. Most 32-bit archs define it as always true (like this)
  237. * but most 64-bit archs actually perform a test. What should we do here?
  238. */
  239. #define kern_addr_valid(addr) (1)
  240. #include <asm-generic/pgtable.h>
  241. /*
  242. * This gets called at the end of handling a page fault, when
  243. * the kernel has put a new PTE into the page table for the process.
  244. * We use it to ensure coherency between the i-cache and d-cache
  245. * for the page which has just been mapped in.
  246. * On machines which use an MMU hash table, we use this to put a
  247. * corresponding HPTE into the hash table ahead of time, instead of
  248. * waiting for the inevitable extra hash-table miss exception.
  249. */
  250. extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
  251. extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
  252. unsigned long end, int write, struct page **pages, int *nr);
  253. extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
  254. unsigned long end, int write, struct page **pages, int *nr);
  255. #ifndef CONFIG_TRANSPARENT_HUGEPAGE
  256. #define pmd_large(pmd) 0
  257. #define has_transparent_hugepage() 0
  258. #endif
  259. pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
  260. unsigned *shift);
  261. static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
  262. unsigned long *pte_sizep)
  263. {
  264. pte_t *ptep;
  265. unsigned long ps = *pte_sizep;
  266. unsigned int shift;
  267. ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
  268. if (!ptep)
  269. return NULL;
  270. if (shift)
  271. *pte_sizep = 1ul << shift;
  272. else
  273. *pte_sizep = PAGE_SIZE;
  274. if (ps > *pte_sizep)
  275. return NULL;
  276. return ptep;
  277. }
  278. #endif /* __ASSEMBLY__ */
  279. #endif /* __KERNEL__ */
  280. #endif /* _ASM_POWERPC_PGTABLE_H */