pgtable-3level.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_PGTABLE_3LEVEL_H
  3. #define _ASM_X86_PGTABLE_3LEVEL_H
  4. #include <asm/atomic64_32.h>
  5. /*
  6. * Intel Physical Address Extension (PAE) Mode - three-level page
  7. * tables on PPro+ CPUs.
  8. *
  9. * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  10. */
  11. #define pte_ERROR(e) \
  12. pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \
  13. __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
  14. #define pmd_ERROR(e) \
  15. pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
  16. __FILE__, __LINE__, &(e), pmd_val(e))
  17. #define pgd_ERROR(e) \
  18. pr_err("%s:%d: bad pgd %p(%016Lx)\n", \
  19. __FILE__, __LINE__, &(e), pgd_val(e))
  20. /* Rules for using set_pte: the pte being assigned *must* be
  21. * either not present or in a state where the hardware will
  22. * not attempt to update the pte. In places where this is
  23. * not possible, use pte_get_and_clear to obtain the old pte
  24. * value and then use set_pte to update it. -ben
  25. */
  26. static inline void native_set_pte(pte_t *ptep, pte_t pte)
  27. {
  28. ptep->pte_high = pte.pte_high;
  29. smp_wmb();
  30. ptep->pte_low = pte.pte_low;
  31. }
  32. #define pmd_read_atomic pmd_read_atomic
  33. /*
  34. * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
  35. * a "*pmdp" dereference done by gcc. Problem is, in certain places
  36. * where pte_offset_map_lock is called, concurrent page faults are
  37. * allowed, if the mmap_sem is hold for reading. An example is mincore
  38. * vs page faults vs MADV_DONTNEED. On the page fault side
  39. * pmd_populate rightfully does a set_64bit, but if we're reading the
  40. * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
  41. * because gcc will not read the 64bit of the pmd atomically. To fix
  42. * this all places running pmd_offset_map_lock() while holding the
  43. * mmap_sem in read mode, shall read the pmdp pointer using this
  44. * function to know if the pmd is null nor not, and in turn to know if
  45. * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
  46. * operations.
  47. *
  48. * Without THP if the mmap_sem is hold for reading, the pmd can only
  49. * transition from null to not null while pmd_read_atomic runs. So
  50. * we can always return atomic pmd values with this function.
  51. *
  52. * With THP if the mmap_sem is hold for reading, the pmd can become
  53. * trans_huge or none or point to a pte (and in turn become "stable")
  54. * at any time under pmd_read_atomic. We could read it really
  55. * atomically here with a atomic64_read for the THP enabled case (and
  56. * it would be a whole lot simpler), but to avoid using cmpxchg8b we
  57. * only return an atomic pmdval if the low part of the pmdval is later
  58. * found stable (i.e. pointing to a pte). And we're returning a none
  59. * pmdval if the low part of the pmd is none. In some cases the high
  60. * and low part of the pmdval returned may not be consistent if THP is
  61. * enabled (the low part may point to previously mapped hugepage,
  62. * while the high part may point to a more recently mapped hugepage),
  63. * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
  64. * of the pmd to be read atomically to decide if the pmd is unstable
  65. * or not, with the only exception of when the low part of the pmd is
  66. * zero in which case we return a none pmd.
  67. */
  68. static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
  69. {
  70. pmdval_t ret;
  71. u32 *tmp = (u32 *)pmdp;
  72. ret = (pmdval_t) (*tmp);
  73. if (ret) {
  74. /*
  75. * If the low part is null, we must not read the high part
  76. * or we can end up with a partial pmd.
  77. */
  78. smp_rmb();
  79. ret |= ((pmdval_t)*(tmp + 1)) << 32;
  80. }
  81. return (pmd_t) { ret };
  82. }
  83. static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
  84. {
  85. set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
  86. }
  87. static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
  88. {
  89. set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
  90. }
  91. static inline void native_set_pud(pud_t *pudp, pud_t pud)
  92. {
  93. #ifdef CONFIG_PAGE_TABLE_ISOLATION
  94. pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
  95. #endif
  96. set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
  97. }
  98. /*
  99. * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
  100. * entry, so clear the bottom half first and enforce ordering with a compiler
  101. * barrier.
  102. */
  103. static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
  104. pte_t *ptep)
  105. {
  106. ptep->pte_low = 0;
  107. smp_wmb();
  108. ptep->pte_high = 0;
  109. }
  110. static inline void native_pmd_clear(pmd_t *pmd)
  111. {
  112. u32 *tmp = (u32 *)pmd;
  113. *tmp = 0;
  114. smp_wmb();
  115. *(tmp + 1) = 0;
  116. }
  117. static inline void native_pud_clear(pud_t *pudp)
  118. {
  119. }
  120. static inline void pud_clear(pud_t *pudp)
  121. {
  122. set_pud(pudp, __pud(0));
  123. /*
  124. * According to Intel App note "TLBs, Paging-Structure Caches,
  125. * and Their Invalidation", April 2007, document 317080-001,
  126. * section 8.1: in PAE mode we explicitly have to flush the
  127. * TLB via cr3 if the top-level pgd is changed...
  128. *
  129. * Currently all places where pud_clear() is called either have
  130. * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
  131. * pud_clear_bad()), so we don't need TLB flush here.
  132. */
  133. }
  134. #ifdef CONFIG_SMP
  135. static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
  136. {
  137. pte_t res;
  138. res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
  139. return res;
  140. }
  141. #else
  142. #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
  143. #endif
  144. union split_pmd {
  145. struct {
  146. u32 pmd_low;
  147. u32 pmd_high;
  148. };
  149. pmd_t pmd;
  150. };
  151. #ifdef CONFIG_SMP
  152. static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
  153. {
  154. union split_pmd res, *orig = (union split_pmd *)pmdp;
  155. /* xchg acts as a barrier before setting of the high bits */
  156. res.pmd_low = xchg(&orig->pmd_low, 0);
  157. res.pmd_high = orig->pmd_high;
  158. orig->pmd_high = 0;
  159. return res.pmd;
  160. }
  161. #else
  162. #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
  163. #endif
  164. #ifndef pmdp_establish
  165. #define pmdp_establish pmdp_establish
  166. static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
  167. unsigned long address, pmd_t *pmdp, pmd_t pmd)
  168. {
  169. pmd_t old;
  170. /*
  171. * If pmd has present bit cleared we can get away without expensive
  172. * cmpxchg64: we can update pmdp half-by-half without racing with
  173. * anybody.
  174. */
  175. if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
  176. union split_pmd old, new, *ptr;
  177. ptr = (union split_pmd *)pmdp;
  178. new.pmd = pmd;
  179. /* xchg acts as a barrier before setting of the high bits */
  180. old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low);
  181. old.pmd_high = ptr->pmd_high;
  182. ptr->pmd_high = new.pmd_high;
  183. return old.pmd;
  184. }
  185. do {
  186. old = *pmdp;
  187. } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
  188. return old;
  189. }
  190. #endif
  191. #ifdef CONFIG_SMP
  192. union split_pud {
  193. struct {
  194. u32 pud_low;
  195. u32 pud_high;
  196. };
  197. pud_t pud;
  198. };
  199. static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
  200. {
  201. union split_pud res, *orig = (union split_pud *)pudp;
  202. #ifdef CONFIG_PAGE_TABLE_ISOLATION
  203. pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0));
  204. #endif
  205. /* xchg acts as a barrier before setting of the high bits */
  206. res.pud_low = xchg(&orig->pud_low, 0);
  207. res.pud_high = orig->pud_high;
  208. orig->pud_high = 0;
  209. return res.pud;
  210. }
  211. #else
  212. #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
  213. #endif
  214. /* Encode and de-code a swap entry */
  215. #define SWP_TYPE_BITS 5
  216. #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
  217. /* We always extract/encode the offset by shifting it all the way up, and then down again */
  218. #define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
  219. #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
  220. #define __swp_type(x) (((x).val) & 0x1f)
  221. #define __swp_offset(x) ((x).val >> 5)
  222. #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
  223. /*
  224. * Normally, __swp_entry() converts from arch-independent swp_entry_t to
  225. * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
  226. * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
  227. * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
  228. * __swp_entry_to_pte() through the following helper macro based on 64bit
  229. * __swp_entry().
  230. */
  231. #define __swp_pteval_entry(type, offset) ((pteval_t) { \
  232. (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
  233. | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
  234. #define __swp_entry_to_pte(x) ((pte_t){ .pte = \
  235. __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
  236. /*
  237. * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
  238. * swp_entry_t, but also has to convert it from 64bit to the 32bit
  239. * intermediate representation, using the following macros based on 64bit
  240. * __swp_type() and __swp_offset().
  241. */
  242. #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
  243. #define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
  244. #define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
  245. __pteval_swp_offset(pte)))
  246. #define gup_get_pte gup_get_pte
  247. /*
  248. * WARNING: only to be used in the get_user_pages_fast() implementation.
  249. *
  250. * With get_user_pages_fast(), we walk down the pagetables without taking
  251. * any locks. For this we would like to load the pointers atomically,
  252. * but that is not possible (without expensive cmpxchg8b) on PAE. What
  253. * we do have is the guarantee that a PTE will only either go from not
  254. * present to present, or present to not present or both -- it will not
  255. * switch to a completely different present page without a TLB flush in
  256. * between; something that we are blocking by holding interrupts off.
  257. *
  258. * Setting ptes from not present to present goes:
  259. *
  260. * ptep->pte_high = h;
  261. * smp_wmb();
  262. * ptep->pte_low = l;
  263. *
  264. * And present to not present goes:
  265. *
  266. * ptep->pte_low = 0;
  267. * smp_wmb();
  268. * ptep->pte_high = 0;
  269. *
  270. * We must ensure here that the load of pte_low sees 'l' iff pte_high
  271. * sees 'h'. We load pte_high *after* loading pte_low, which ensures we
  272. * don't see an older value of pte_high. *Then* we recheck pte_low,
  273. * which ensures that we haven't picked up a changed pte high. We might
  274. * have gotten rubbish values from pte_low and pte_high, but we are
  275. * guaranteed that pte_low will not have the present bit set *unless*
  276. * it is 'l'. Because get_user_pages_fast() only operates on present ptes
  277. * we're safe.
  278. */
  279. static inline pte_t gup_get_pte(pte_t *ptep)
  280. {
  281. pte_t pte;
  282. do {
  283. pte.pte_low = ptep->pte_low;
  284. smp_rmb();
  285. pte.pte_high = ptep->pte_high;
  286. smp_rmb();
  287. } while (unlikely(pte.pte_low != ptep->pte_low));
  288. return pte;
  289. }
  290. #include <asm/pgtable-invert.h>
  291. #endif /* _ASM_X86_PGTABLE_3LEVEL_H */