pgtable.h 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_PGTABLE_H
  3. #define _ASM_X86_PGTABLE_H
  4. #include <linux/mem_encrypt.h>
  5. #include <asm/page.h>
  6. #include <asm/pgtable_types.h>
  7. /*
  8. * Macro to mark a page protection value as UC-
  9. */
  10. #define pgprot_noncached(prot) \
  11. ((boot_cpu_data.x86 > 3) \
  12. ? (__pgprot(pgprot_val(prot) | \
  13. cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
  14. : (prot))
  15. /*
  16. * Macros to add or remove encryption attribute
  17. */
  18. #define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
  19. #define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
  20. #ifndef __ASSEMBLY__
  21. #include <asm/x86_init.h>
  22. extern pgd_t early_top_pgt[PTRS_PER_PGD];
  23. int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
  24. void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
  25. void ptdump_walk_pgd_level_checkwx(void);
  26. #ifdef CONFIG_DEBUG_WX
  27. #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
  28. #else
  29. #define debug_checkwx() do { } while (0)
  30. #endif
  31. /*
  32. * ZERO_PAGE is a global shared page that is always zero: used
  33. * for zero-mapped memory areas etc..
  34. */
  35. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
  36. __visible;
  37. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  38. extern spinlock_t pgd_lock;
  39. extern struct list_head pgd_list;
  40. extern struct mm_struct *pgd_page_get_mm(struct page *page);
  41. extern pmdval_t early_pmd_flags;
  42. #ifdef CONFIG_PARAVIRT
  43. #include <asm/paravirt.h>
  44. #else /* !CONFIG_PARAVIRT */
  45. #define set_pte(ptep, pte) native_set_pte(ptep, pte)
  46. #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  47. #define set_pte_atomic(ptep, pte) \
  48. native_set_pte_atomic(ptep, pte)
  49. #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
  50. #ifndef __PAGETABLE_P4D_FOLDED
  51. #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
  52. #define pgd_clear(pgd) native_pgd_clear(pgd)
  53. #endif
  54. #ifndef set_p4d
  55. # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
  56. #endif
  57. #ifndef __PAGETABLE_PUD_FOLDED
  58. #define p4d_clear(p4d) native_p4d_clear(p4d)
  59. #endif
  60. #ifndef set_pud
  61. # define set_pud(pudp, pud) native_set_pud(pudp, pud)
  62. #endif
  63. #ifndef __PAGETABLE_PUD_FOLDED
  64. #define pud_clear(pud) native_pud_clear(pud)
  65. #endif
  66. #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
  67. #define pmd_clear(pmd) native_pmd_clear(pmd)
  68. #define pgd_val(x) native_pgd_val(x)
  69. #define __pgd(x) native_make_pgd(x)
  70. #ifndef __PAGETABLE_P4D_FOLDED
  71. #define p4d_val(x) native_p4d_val(x)
  72. #define __p4d(x) native_make_p4d(x)
  73. #endif
  74. #ifndef __PAGETABLE_PUD_FOLDED
  75. #define pud_val(x) native_pud_val(x)
  76. #define __pud(x) native_make_pud(x)
  77. #endif
  78. #ifndef __PAGETABLE_PMD_FOLDED
  79. #define pmd_val(x) native_pmd_val(x)
  80. #define __pmd(x) native_make_pmd(x)
  81. #endif
  82. #define pte_val(x) native_pte_val(x)
  83. #define __pte(x) native_make_pte(x)
  84. #define arch_end_context_switch(prev) do {} while(0)
  85. #endif /* CONFIG_PARAVIRT */
  86. /*
  87. * The following only work if pte_present() is true.
  88. * Undefined behaviour if not..
  89. */
  90. static inline int pte_dirty(pte_t pte)
  91. {
  92. return pte_flags(pte) & _PAGE_DIRTY;
  93. }
  94. static inline u32 read_pkru(void)
  95. {
  96. if (boot_cpu_has(X86_FEATURE_OSPKE))
  97. return __read_pkru();
  98. return 0;
  99. }
  100. static inline void write_pkru(u32 pkru)
  101. {
  102. if (boot_cpu_has(X86_FEATURE_OSPKE))
  103. __write_pkru(pkru);
  104. }
  105. static inline int pte_young(pte_t pte)
  106. {
  107. return pte_flags(pte) & _PAGE_ACCESSED;
  108. }
  109. static inline int pmd_dirty(pmd_t pmd)
  110. {
  111. return pmd_flags(pmd) & _PAGE_DIRTY;
  112. }
  113. static inline int pmd_young(pmd_t pmd)
  114. {
  115. return pmd_flags(pmd) & _PAGE_ACCESSED;
  116. }
  117. static inline int pud_dirty(pud_t pud)
  118. {
  119. return pud_flags(pud) & _PAGE_DIRTY;
  120. }
  121. static inline int pud_young(pud_t pud)
  122. {
  123. return pud_flags(pud) & _PAGE_ACCESSED;
  124. }
  125. static inline int pte_write(pte_t pte)
  126. {
  127. return pte_flags(pte) & _PAGE_RW;
  128. }
  129. static inline int pte_huge(pte_t pte)
  130. {
  131. return pte_flags(pte) & _PAGE_PSE;
  132. }
  133. static inline int pte_global(pte_t pte)
  134. {
  135. return pte_flags(pte) & _PAGE_GLOBAL;
  136. }
  137. static inline int pte_exec(pte_t pte)
  138. {
  139. return !(pte_flags(pte) & _PAGE_NX);
  140. }
  141. static inline int pte_special(pte_t pte)
  142. {
  143. return pte_flags(pte) & _PAGE_SPECIAL;
  144. }
  145. static inline unsigned long pte_pfn(pte_t pte)
  146. {
  147. return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
  148. }
  149. static inline unsigned long pmd_pfn(pmd_t pmd)
  150. {
  151. return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
  152. }
  153. static inline unsigned long pud_pfn(pud_t pud)
  154. {
  155. return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
  156. }
  157. static inline unsigned long p4d_pfn(p4d_t p4d)
  158. {
  159. return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
  160. }
  161. static inline unsigned long pgd_pfn(pgd_t pgd)
  162. {
  163. return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
  164. }
  165. static inline int p4d_large(p4d_t p4d)
  166. {
  167. /* No 512 GiB pages yet */
  168. return 0;
  169. }
  170. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  171. static inline int pmd_large(pmd_t pte)
  172. {
  173. return pmd_flags(pte) & _PAGE_PSE;
  174. }
  175. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  176. static inline int pmd_trans_huge(pmd_t pmd)
  177. {
  178. return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
  179. }
  180. #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  181. static inline int pud_trans_huge(pud_t pud)
  182. {
  183. return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
  184. }
  185. #endif
  186. #define has_transparent_hugepage has_transparent_hugepage
  187. static inline int has_transparent_hugepage(void)
  188. {
  189. return boot_cpu_has(X86_FEATURE_PSE);
  190. }
  191. #ifdef __HAVE_ARCH_PTE_DEVMAP
  192. static inline int pmd_devmap(pmd_t pmd)
  193. {
  194. return !!(pmd_val(pmd) & _PAGE_DEVMAP);
  195. }
  196. #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  197. static inline int pud_devmap(pud_t pud)
  198. {
  199. return !!(pud_val(pud) & _PAGE_DEVMAP);
  200. }
  201. #else
  202. static inline int pud_devmap(pud_t pud)
  203. {
  204. return 0;
  205. }
  206. #endif
  207. static inline int pgd_devmap(pgd_t pgd)
  208. {
  209. return 0;
  210. }
  211. #endif
  212. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  213. static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
  214. {
  215. pteval_t v = native_pte_val(pte);
  216. return native_make_pte(v | set);
  217. }
  218. static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
  219. {
  220. pteval_t v = native_pte_val(pte);
  221. return native_make_pte(v & ~clear);
  222. }
  223. static inline pte_t pte_mkclean(pte_t pte)
  224. {
  225. return pte_clear_flags(pte, _PAGE_DIRTY);
  226. }
  227. static inline pte_t pte_mkold(pte_t pte)
  228. {
  229. return pte_clear_flags(pte, _PAGE_ACCESSED);
  230. }
  231. static inline pte_t pte_wrprotect(pte_t pte)
  232. {
  233. return pte_clear_flags(pte, _PAGE_RW);
  234. }
  235. static inline pte_t pte_mkexec(pte_t pte)
  236. {
  237. return pte_clear_flags(pte, _PAGE_NX);
  238. }
  239. static inline pte_t pte_mkdirty(pte_t pte)
  240. {
  241. return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  242. }
  243. static inline pte_t pte_mkyoung(pte_t pte)
  244. {
  245. return pte_set_flags(pte, _PAGE_ACCESSED);
  246. }
  247. static inline pte_t pte_mkwrite(pte_t pte)
  248. {
  249. return pte_set_flags(pte, _PAGE_RW);
  250. }
  251. static inline pte_t pte_mkhuge(pte_t pte)
  252. {
  253. return pte_set_flags(pte, _PAGE_PSE);
  254. }
  255. static inline pte_t pte_clrhuge(pte_t pte)
  256. {
  257. return pte_clear_flags(pte, _PAGE_PSE);
  258. }
  259. static inline pte_t pte_mkglobal(pte_t pte)
  260. {
  261. return pte_set_flags(pte, _PAGE_GLOBAL);
  262. }
  263. static inline pte_t pte_clrglobal(pte_t pte)
  264. {
  265. return pte_clear_flags(pte, _PAGE_GLOBAL);
  266. }
  267. static inline pte_t pte_mkspecial(pte_t pte)
  268. {
  269. return pte_set_flags(pte, _PAGE_SPECIAL);
  270. }
  271. static inline pte_t pte_mkdevmap(pte_t pte)
  272. {
  273. return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
  274. }
  275. static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
  276. {
  277. pmdval_t v = native_pmd_val(pmd);
  278. return __pmd(v | set);
  279. }
  280. static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
  281. {
  282. pmdval_t v = native_pmd_val(pmd);
  283. return __pmd(v & ~clear);
  284. }
  285. static inline pmd_t pmd_mkold(pmd_t pmd)
  286. {
  287. return pmd_clear_flags(pmd, _PAGE_ACCESSED);
  288. }
  289. static inline pmd_t pmd_mkclean(pmd_t pmd)
  290. {
  291. return pmd_clear_flags(pmd, _PAGE_DIRTY);
  292. }
  293. static inline pmd_t pmd_wrprotect(pmd_t pmd)
  294. {
  295. return pmd_clear_flags(pmd, _PAGE_RW);
  296. }
  297. static inline pmd_t pmd_mkdirty(pmd_t pmd)
  298. {
  299. return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  300. }
  301. static inline pmd_t pmd_mkdevmap(pmd_t pmd)
  302. {
  303. return pmd_set_flags(pmd, _PAGE_DEVMAP);
  304. }
  305. static inline pmd_t pmd_mkhuge(pmd_t pmd)
  306. {
  307. return pmd_set_flags(pmd, _PAGE_PSE);
  308. }
  309. static inline pmd_t pmd_mkyoung(pmd_t pmd)
  310. {
  311. return pmd_set_flags(pmd, _PAGE_ACCESSED);
  312. }
  313. static inline pmd_t pmd_mkwrite(pmd_t pmd)
  314. {
  315. return pmd_set_flags(pmd, _PAGE_RW);
  316. }
  317. static inline pmd_t pmd_mknotpresent(pmd_t pmd)
  318. {
  319. return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
  320. }
  321. static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
  322. {
  323. pudval_t v = native_pud_val(pud);
  324. return __pud(v | set);
  325. }
  326. static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
  327. {
  328. pudval_t v = native_pud_val(pud);
  329. return __pud(v & ~clear);
  330. }
  331. static inline pud_t pud_mkold(pud_t pud)
  332. {
  333. return pud_clear_flags(pud, _PAGE_ACCESSED);
  334. }
  335. static inline pud_t pud_mkclean(pud_t pud)
  336. {
  337. return pud_clear_flags(pud, _PAGE_DIRTY);
  338. }
  339. static inline pud_t pud_wrprotect(pud_t pud)
  340. {
  341. return pud_clear_flags(pud, _PAGE_RW);
  342. }
  343. static inline pud_t pud_mkdirty(pud_t pud)
  344. {
  345. return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  346. }
  347. static inline pud_t pud_mkdevmap(pud_t pud)
  348. {
  349. return pud_set_flags(pud, _PAGE_DEVMAP);
  350. }
  351. static inline pud_t pud_mkhuge(pud_t pud)
  352. {
  353. return pud_set_flags(pud, _PAGE_PSE);
  354. }
  355. static inline pud_t pud_mkyoung(pud_t pud)
  356. {
  357. return pud_set_flags(pud, _PAGE_ACCESSED);
  358. }
  359. static inline pud_t pud_mkwrite(pud_t pud)
  360. {
  361. return pud_set_flags(pud, _PAGE_RW);
  362. }
  363. static inline pud_t pud_mknotpresent(pud_t pud)
  364. {
  365. return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
  366. }
  367. #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
  368. static inline int pte_soft_dirty(pte_t pte)
  369. {
  370. return pte_flags(pte) & _PAGE_SOFT_DIRTY;
  371. }
  372. static inline int pmd_soft_dirty(pmd_t pmd)
  373. {
  374. return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
  375. }
  376. static inline int pud_soft_dirty(pud_t pud)
  377. {
  378. return pud_flags(pud) & _PAGE_SOFT_DIRTY;
  379. }
  380. static inline pte_t pte_mksoft_dirty(pte_t pte)
  381. {
  382. return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
  383. }
  384. static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
  385. {
  386. return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
  387. }
  388. static inline pud_t pud_mksoft_dirty(pud_t pud)
  389. {
  390. return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
  391. }
  392. static inline pte_t pte_clear_soft_dirty(pte_t pte)
  393. {
  394. return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
  395. }
  396. static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
  397. {
  398. return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
  399. }
  400. static inline pud_t pud_clear_soft_dirty(pud_t pud)
  401. {
  402. return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
  403. }
  404. #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
  405. /*
  406. * Mask out unsupported bits in a present pgprot. Non-present pgprots
  407. * can use those bits for other purposes, so leave them be.
  408. */
  409. static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
  410. {
  411. pgprotval_t protval = pgprot_val(pgprot);
  412. if (protval & _PAGE_PRESENT)
  413. protval &= __supported_pte_mask;
  414. return protval;
  415. }
  416. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  417. {
  418. return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
  419. massage_pgprot(pgprot));
  420. }
  421. static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
  422. {
  423. return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
  424. massage_pgprot(pgprot));
  425. }
  426. static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
  427. {
  428. return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
  429. massage_pgprot(pgprot));
  430. }
  431. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  432. {
  433. pteval_t val = pte_val(pte);
  434. /*
  435. * Chop off the NX bit (if present), and add the NX portion of
  436. * the newprot (if present):
  437. */
  438. val &= _PAGE_CHG_MASK;
  439. val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
  440. return __pte(val);
  441. }
  442. static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  443. {
  444. pmdval_t val = pmd_val(pmd);
  445. val &= _HPAGE_CHG_MASK;
  446. val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
  447. return __pmd(val);
  448. }
  449. /* mprotect needs to preserve PAT bits when updating vm_page_prot */
  450. #define pgprot_modify pgprot_modify
  451. static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  452. {
  453. pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
  454. pgprotval_t addbits = pgprot_val(newprot);
  455. return __pgprot(preservebits | addbits);
  456. }
  457. #define pte_pgprot(x) __pgprot(pte_flags(x))
  458. #define pmd_pgprot(x) __pgprot(pmd_flags(x))
  459. #define pud_pgprot(x) __pgprot(pud_flags(x))
  460. #define p4d_pgprot(x) __pgprot(p4d_flags(x))
  461. #define canon_pgprot(p) __pgprot(massage_pgprot(p))
  462. static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
  463. enum page_cache_mode pcm,
  464. enum page_cache_mode new_pcm)
  465. {
  466. /*
  467. * PAT type is always WB for untracked ranges, so no need to check.
  468. */
  469. if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
  470. return 1;
  471. /*
  472. * Certain new memtypes are not allowed with certain
  473. * requested memtype:
  474. * - request is uncached, return cannot be write-back
  475. * - request is write-combine, return cannot be write-back
  476. * - request is write-through, return cannot be write-back
  477. * - request is write-through, return cannot be write-combine
  478. */
  479. if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
  480. new_pcm == _PAGE_CACHE_MODE_WB) ||
  481. (pcm == _PAGE_CACHE_MODE_WC &&
  482. new_pcm == _PAGE_CACHE_MODE_WB) ||
  483. (pcm == _PAGE_CACHE_MODE_WT &&
  484. new_pcm == _PAGE_CACHE_MODE_WB) ||
  485. (pcm == _PAGE_CACHE_MODE_WT &&
  486. new_pcm == _PAGE_CACHE_MODE_WC)) {
  487. return 0;
  488. }
  489. return 1;
  490. }
  491. pmd_t *populate_extra_pmd(unsigned long vaddr);
  492. pte_t *populate_extra_pte(unsigned long vaddr);
  493. #endif /* __ASSEMBLY__ */
  494. #ifdef CONFIG_X86_32
  495. # include <asm/pgtable_32.h>
  496. #else
  497. # include <asm/pgtable_64.h>
  498. #endif
  499. #ifndef __ASSEMBLY__
  500. #include <linux/mm_types.h>
  501. #include <linux/mmdebug.h>
  502. #include <linux/log2.h>
  503. #include <asm/fixmap.h>
  504. static inline int pte_none(pte_t pte)
  505. {
  506. return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
  507. }
  508. #define __HAVE_ARCH_PTE_SAME
  509. static inline int pte_same(pte_t a, pte_t b)
  510. {
  511. return a.pte == b.pte;
  512. }
  513. static inline int pte_present(pte_t a)
  514. {
  515. return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
  516. }
  517. #ifdef __HAVE_ARCH_PTE_DEVMAP
  518. static inline int pte_devmap(pte_t a)
  519. {
  520. return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
  521. }
  522. #endif
  523. #define pte_accessible pte_accessible
  524. static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
  525. {
  526. if (pte_flags(a) & _PAGE_PRESENT)
  527. return true;
  528. if ((pte_flags(a) & _PAGE_PROTNONE) &&
  529. mm_tlb_flush_pending(mm))
  530. return true;
  531. return false;
  532. }
  533. static inline int pmd_present(pmd_t pmd)
  534. {
  535. /*
  536. * Checking for _PAGE_PSE is needed too because
  537. * split_huge_page will temporarily clear the present bit (but
  538. * the _PAGE_PSE flag will remain set at all times while the
  539. * _PAGE_PRESENT bit is clear).
  540. */
  541. return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
  542. }
  543. #ifdef CONFIG_NUMA_BALANCING
  544. /*
  545. * These work without NUMA balancing but the kernel does not care. See the
  546. * comment in include/asm-generic/pgtable.h
  547. */
  548. static inline int pte_protnone(pte_t pte)
  549. {
  550. return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
  551. == _PAGE_PROTNONE;
  552. }
  553. static inline int pmd_protnone(pmd_t pmd)
  554. {
  555. return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
  556. == _PAGE_PROTNONE;
  557. }
  558. #endif /* CONFIG_NUMA_BALANCING */
  559. static inline int pmd_none(pmd_t pmd)
  560. {
  561. /* Only check low word on 32-bit platforms, since it might be
  562. out of sync with upper half. */
  563. unsigned long val = native_pmd_val(pmd);
  564. return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
  565. }
  566. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  567. {
  568. return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
  569. }
  570. /*
  571. * Currently stuck as a macro due to indirect forward reference to
  572. * linux/mmzone.h's __section_mem_map_addr() definition:
  573. */
  574. #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
  575. /*
  576. * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  577. *
  578. * this macro returns the index of the entry in the pmd page which would
  579. * control the given virtual address
  580. */
  581. static inline unsigned long pmd_index(unsigned long address)
  582. {
  583. return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
  584. }
  585. /*
  586. * Conversion functions: convert a page and protection to a page entry,
  587. * and a page entry and page directory to the page they refer to.
  588. *
  589. * (Currently stuck as a macro because of indirect forward reference
  590. * to linux/mm.h:page_to_nid())
  591. */
  592. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  593. /*
  594. * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  595. *
  596. * this function returns the index of the entry in the pte page which would
  597. * control the given virtual address
  598. */
  599. static inline unsigned long pte_index(unsigned long address)
  600. {
  601. return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  602. }
  603. static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
  604. {
  605. return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
  606. }
  607. static inline int pmd_bad(pmd_t pmd)
  608. {
  609. return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
  610. }
  611. static inline unsigned long pages_to_mb(unsigned long npg)
  612. {
  613. return npg >> (20 - PAGE_SHIFT);
  614. }
  615. #if CONFIG_PGTABLE_LEVELS > 2
  616. static inline int pud_none(pud_t pud)
  617. {
  618. return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
  619. }
  620. static inline int pud_present(pud_t pud)
  621. {
  622. return pud_flags(pud) & _PAGE_PRESENT;
  623. }
  624. static inline unsigned long pud_page_vaddr(pud_t pud)
  625. {
  626. return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
  627. }
  628. /*
  629. * Currently stuck as a macro due to indirect forward reference to
  630. * linux/mmzone.h's __section_mem_map_addr() definition:
  631. */
  632. #define pud_page(pud) pfn_to_page(pud_pfn(pud))
  633. /* Find an entry in the second-level page table.. */
  634. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  635. {
  636. return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
  637. }
  638. static inline int pud_large(pud_t pud)
  639. {
  640. return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  641. (_PAGE_PSE | _PAGE_PRESENT);
  642. }
  643. static inline int pud_bad(pud_t pud)
  644. {
  645. return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
  646. }
  647. #else
  648. static inline int pud_large(pud_t pud)
  649. {
  650. return 0;
  651. }
  652. #endif /* CONFIG_PGTABLE_LEVELS > 2 */
  653. static inline unsigned long pud_index(unsigned long address)
  654. {
  655. return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
  656. }
  657. #if CONFIG_PGTABLE_LEVELS > 3
  658. static inline int p4d_none(p4d_t p4d)
  659. {
  660. return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
  661. }
  662. static inline int p4d_present(p4d_t p4d)
  663. {
  664. return p4d_flags(p4d) & _PAGE_PRESENT;
  665. }
  666. static inline unsigned long p4d_page_vaddr(p4d_t p4d)
  667. {
  668. return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
  669. }
  670. /*
  671. * Currently stuck as a macro due to indirect forward reference to
  672. * linux/mmzone.h's __section_mem_map_addr() definition:
  673. */
  674. #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
  675. /* Find an entry in the third-level page table.. */
  676. static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
  677. {
  678. return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
  679. }
  680. static inline int p4d_bad(p4d_t p4d)
  681. {
  682. return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
  683. }
  684. #endif /* CONFIG_PGTABLE_LEVELS > 3 */
  685. static inline unsigned long p4d_index(unsigned long address)
  686. {
  687. return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
  688. }
  689. #if CONFIG_PGTABLE_LEVELS > 4
  690. static inline int pgd_present(pgd_t pgd)
  691. {
  692. return pgd_flags(pgd) & _PAGE_PRESENT;
  693. }
  694. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  695. {
  696. return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
  697. }
  698. /*
  699. * Currently stuck as a macro due to indirect forward reference to
  700. * linux/mmzone.h's __section_mem_map_addr() definition:
  701. */
  702. #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
  703. /* to find an entry in a page-table-directory. */
  704. static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
  705. {
  706. return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
  707. }
  708. static inline int pgd_bad(pgd_t pgd)
  709. {
  710. return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
  711. }
  712. static inline int pgd_none(pgd_t pgd)
  713. {
  714. /*
  715. * There is no need to do a workaround for the KNL stray
  716. * A/D bit erratum here. PGDs only point to page tables
  717. * except on 32-bit non-PAE which is not supported on
  718. * KNL.
  719. */
  720. return !native_pgd_val(pgd);
  721. }
  722. #endif /* CONFIG_PGTABLE_LEVELS > 4 */
  723. #endif /* __ASSEMBLY__ */
  724. /*
  725. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  726. *
  727. * this macro returns the index of the entry in the pgd page which would
  728. * control the given virtual address
  729. */
  730. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  731. /*
  732. * pgd_offset() returns a (pgd_t *)
  733. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  734. */
  735. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  736. /*
  737. * a shortcut which implies the use of the kernel's pgd, instead
  738. * of a process's
  739. */
  740. #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
  741. #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
  742. #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
  743. #ifndef __ASSEMBLY__
  744. extern int direct_gbpages;
  745. void init_mem_mapping(void);
  746. void early_alloc_pgt_buf(void);
  747. extern void memblock_find_dma_reserve(void);
  748. #ifdef CONFIG_X86_64
  749. /* Realmode trampoline initialization. */
  750. extern pgd_t trampoline_pgd_entry;
  751. static inline void __meminit init_trampoline_default(void)
  752. {
  753. /* Default trampoline pgd value */
  754. trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
  755. }
  756. # ifdef CONFIG_RANDOMIZE_MEMORY
  757. void __meminit init_trampoline(void);
  758. # else
  759. # define init_trampoline init_trampoline_default
  760. # endif
  761. #else
  762. static inline void init_trampoline(void) { }
  763. #endif
  764. /* local pte updates need not use xchg for locking */
  765. static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
  766. {
  767. pte_t res = *ptep;
  768. /* Pure native function needs no input for mm, addr */
  769. native_pte_clear(NULL, 0, ptep);
  770. return res;
  771. }
  772. static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
  773. {
  774. pmd_t res = *pmdp;
  775. native_pmd_clear(pmdp);
  776. return res;
  777. }
  778. static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
  779. {
  780. pud_t res = *pudp;
  781. native_pud_clear(pudp);
  782. return res;
  783. }
  784. static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  785. pte_t *ptep , pte_t pte)
  786. {
  787. native_set_pte(ptep, pte);
  788. }
  789. static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  790. pmd_t *pmdp, pmd_t pmd)
  791. {
  792. native_set_pmd(pmdp, pmd);
  793. }
  794. static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
  795. pud_t *pudp, pud_t pud)
  796. {
  797. native_set_pud(pudp, pud);
  798. }
  799. /*
  800. * We only update the dirty/accessed state if we set
  801. * the dirty bit by hand in the kernel, since the hardware
  802. * will do the accessed bit for us, and we don't want to
  803. * race with other CPU's that might be updating the dirty
  804. * bit at the same time.
  805. */
  806. struct vm_area_struct;
  807. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  808. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  809. unsigned long address, pte_t *ptep,
  810. pte_t entry, int dirty);
  811. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  812. extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
  813. unsigned long addr, pte_t *ptep);
  814. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  815. extern int ptep_clear_flush_young(struct vm_area_struct *vma,
  816. unsigned long address, pte_t *ptep);
  817. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  818. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  819. pte_t *ptep)
  820. {
  821. pte_t pte = native_ptep_get_and_clear(ptep);
  822. return pte;
  823. }
  824. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  825. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  826. unsigned long addr, pte_t *ptep,
  827. int full)
  828. {
  829. pte_t pte;
  830. if (full) {
  831. /*
  832. * Full address destruction in progress; paravirt does not
  833. * care about updates and native needs no locking
  834. */
  835. pte = native_local_ptep_get_and_clear(ptep);
  836. } else {
  837. pte = ptep_get_and_clear(mm, addr, ptep);
  838. }
  839. return pte;
  840. }
  841. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  842. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  843. unsigned long addr, pte_t *ptep)
  844. {
  845. clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
  846. }
  847. #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
  848. #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
  849. #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  850. extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  851. unsigned long address, pmd_t *pmdp,
  852. pmd_t entry, int dirty);
  853. extern int pudp_set_access_flags(struct vm_area_struct *vma,
  854. unsigned long address, pud_t *pudp,
  855. pud_t entry, int dirty);
  856. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  857. extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  858. unsigned long addr, pmd_t *pmdp);
  859. extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
  860. unsigned long addr, pud_t *pudp);
  861. #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  862. extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
  863. unsigned long address, pmd_t *pmdp);
  864. #define __HAVE_ARCH_PMD_WRITE
  865. static inline int pmd_write(pmd_t pmd)
  866. {
  867. return pmd_flags(pmd) & _PAGE_RW;
  868. }
  869. #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
  870. static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
  871. pmd_t *pmdp)
  872. {
  873. return native_pmdp_get_and_clear(pmdp);
  874. }
  875. #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
  876. static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
  877. unsigned long addr, pud_t *pudp)
  878. {
  879. return native_pudp_get_and_clear(pudp);
  880. }
  881. #define __HAVE_ARCH_PMDP_SET_WRPROTECT
  882. static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  883. unsigned long addr, pmd_t *pmdp)
  884. {
  885. clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
  886. }
  887. /*
  888. * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  889. *
  890. * dst - pointer to pgd range anwhere on a pgd page
  891. * src - ""
  892. * count - the number of pgds to copy.
  893. *
  894. * dst and src can be on the same page, but the range must not overlap,
  895. * and must not cross a page boundary.
  896. */
  897. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  898. {
  899. memcpy(dst, src, count * sizeof(pgd_t));
  900. }
  901. #define PTE_SHIFT ilog2(PTRS_PER_PTE)
  902. static inline int page_level_shift(enum pg_level level)
  903. {
  904. return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
  905. }
  906. static inline unsigned long page_level_size(enum pg_level level)
  907. {
  908. return 1UL << page_level_shift(level);
  909. }
  910. static inline unsigned long page_level_mask(enum pg_level level)
  911. {
  912. return ~(page_level_size(level) - 1);
  913. }
  914. /*
  915. * The x86 doesn't have any external MMU info: the kernel page
  916. * tables contain all the necessary information.
  917. */
  918. static inline void update_mmu_cache(struct vm_area_struct *vma,
  919. unsigned long addr, pte_t *ptep)
  920. {
  921. }
  922. static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
  923. unsigned long addr, pmd_t *pmd)
  924. {
  925. }
  926. static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
  927. unsigned long addr, pud_t *pud)
  928. {
  929. }
  930. #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
  931. static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
  932. {
  933. return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
  934. }
  935. static inline int pte_swp_soft_dirty(pte_t pte)
  936. {
  937. return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
  938. }
  939. static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
  940. {
  941. return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
  942. }
  943. #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
  944. static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
  945. {
  946. return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
  947. }
  948. static inline int pmd_swp_soft_dirty(pmd_t pmd)
  949. {
  950. return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
  951. }
  952. static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
  953. {
  954. return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
  955. }
  956. #endif
  957. #endif
  958. #define PKRU_AD_BIT 0x1
  959. #define PKRU_WD_BIT 0x2
  960. #define PKRU_BITS_PER_PKEY 2
  961. static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
  962. {
  963. int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
  964. return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
  965. }
  966. static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
  967. {
  968. int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
  969. /*
  970. * Access-disable disables writes too so we need to check
  971. * both bits here.
  972. */
  973. return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
  974. }
  975. static inline u16 pte_flags_pkey(unsigned long pte_flags)
  976. {
  977. #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
  978. /* ifdef to avoid doing 59-bit shift on 32-bit values */
  979. return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
  980. #else
  981. return 0;
  982. #endif
  983. }
  984. static inline bool __pkru_allows_pkey(u16 pkey, bool write)
  985. {
  986. u32 pkru = read_pkru();
  987. if (!__pkru_allows_read(pkru, pkey))
  988. return false;
  989. if (write && !__pkru_allows_write(pkru, pkey))
  990. return false;
  991. return true;
  992. }
  993. /*
  994. * 'pteval' can come from a PTE, PMD or PUD. We only check
  995. * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
  996. * same value on all 3 types.
  997. */
  998. static inline bool __pte_access_permitted(unsigned long pteval, bool write)
  999. {
  1000. unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
  1001. if (write)
  1002. need_pte_bits |= _PAGE_RW;
  1003. if ((pteval & need_pte_bits) != need_pte_bits)
  1004. return 0;
  1005. return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
  1006. }
  1007. #define pte_access_permitted pte_access_permitted
  1008. static inline bool pte_access_permitted(pte_t pte, bool write)
  1009. {
  1010. return __pte_access_permitted(pte_val(pte), write);
  1011. }
  1012. #define pmd_access_permitted pmd_access_permitted
  1013. static inline bool pmd_access_permitted(pmd_t pmd, bool write)
  1014. {
  1015. return __pte_access_permitted(pmd_val(pmd), write);
  1016. }
  1017. #define pud_access_permitted pud_access_permitted
  1018. static inline bool pud_access_permitted(pud_t pud, bool write)
  1019. {
  1020. return __pte_access_permitted(pud_val(pud), write);
  1021. }
  1022. #include <asm-generic/pgtable.h>
  1023. #endif /* __ASSEMBLY__ */
  1024. #endif /* _ASM_X86_PGTABLE_H */