dump_pagetables.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. /*
  2. * Debug helper to dump the current kernel pagetables of the system
  3. * so that we can see what the various memory ranges are set to.
  4. *
  5. * (C) Copyright 2008 Intel Corporation
  6. *
  7. * Author: Arjan van de Ven <arjan@linux.intel.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; version 2
  12. * of the License.
  13. */
  14. #include <linux/debugfs.h>
  15. #include <linux/kasan.h>
  16. #include <linux/mm.h>
  17. #include <linux/init.h>
  18. #include <linux/sched.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/highmem.h>
  21. #include <asm/pgtable.h>
  22. /*
  23. * The dumper groups pagetable entries of the same type into one, and for
  24. * that it needs to keep some state when walking, and flush this state
  25. * when a "break" in the continuity is found.
  26. */
  27. struct pg_state {
  28. int level;
  29. pgprot_t current_prot;
  30. pgprotval_t effective_prot;
  31. unsigned long start_address;
  32. unsigned long current_address;
  33. const struct addr_marker *marker;
  34. unsigned long lines;
  35. bool to_dmesg;
  36. bool check_wx;
  37. unsigned long wx_pages;
  38. };
  39. struct addr_marker {
  40. unsigned long start_address;
  41. const char *name;
  42. unsigned long max_lines;
  43. };
  44. /* Address space markers hints */
  45. #ifdef CONFIG_X86_64
  46. enum address_markers_idx {
  47. USER_SPACE_NR = 0,
  48. KERNEL_SPACE_NR,
  49. LOW_KERNEL_NR,
  50. #if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
  51. LDT_NR,
  52. #endif
  53. VMALLOC_START_NR,
  54. VMEMMAP_START_NR,
  55. #ifdef CONFIG_KASAN
  56. KASAN_SHADOW_START_NR,
  57. KASAN_SHADOW_END_NR,
  58. #endif
  59. CPU_ENTRY_AREA_NR,
  60. #if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
  61. LDT_NR,
  62. #endif
  63. #ifdef CONFIG_X86_ESPFIX64
  64. ESPFIX_START_NR,
  65. #endif
  66. #ifdef CONFIG_EFI
  67. EFI_END_NR,
  68. #endif
  69. HIGH_KERNEL_NR,
  70. MODULES_VADDR_NR,
  71. MODULES_END_NR,
  72. FIXADDR_START_NR,
  73. END_OF_SPACE_NR,
  74. };
  75. static struct addr_marker address_markers[] = {
  76. [USER_SPACE_NR] = { 0, "User Space" },
  77. [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" },
  78. [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" },
  79. [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
  80. [VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
  81. #ifdef CONFIG_KASAN
  82. /*
  83. * These fields get initialized with the (dynamic)
  84. * KASAN_SHADOW_{START,END} values in pt_dump_init().
  85. */
  86. [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" },
  87. [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" },
  88. #endif
  89. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  90. [LDT_NR] = { 0UL, "LDT remap" },
  91. #endif
  92. [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
  93. #ifdef CONFIG_X86_ESPFIX64
  94. [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
  95. #endif
  96. #ifdef CONFIG_EFI
  97. [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" },
  98. #endif
  99. [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" },
  100. [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" },
  101. [MODULES_END_NR] = { MODULES_END, "End Modules" },
  102. [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" },
  103. [END_OF_SPACE_NR] = { -1, NULL }
  104. };
  105. #else /* CONFIG_X86_64 */
  106. enum address_markers_idx {
  107. USER_SPACE_NR = 0,
  108. KERNEL_SPACE_NR,
  109. VMALLOC_START_NR,
  110. VMALLOC_END_NR,
  111. #ifdef CONFIG_HIGHMEM
  112. PKMAP_BASE_NR,
  113. #endif
  114. CPU_ENTRY_AREA_NR,
  115. FIXADDR_START_NR,
  116. END_OF_SPACE_NR,
  117. };
  118. static struct addr_marker address_markers[] = {
  119. [USER_SPACE_NR] = { 0, "User Space" },
  120. [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" },
  121. [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
  122. [VMALLOC_END_NR] = { 0UL, "vmalloc() End" },
  123. #ifdef CONFIG_HIGHMEM
  124. [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
  125. #endif
  126. [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
  127. [FIXADDR_START_NR] = { 0UL, "Fixmap area" },
  128. [END_OF_SPACE_NR] = { -1, NULL }
  129. };
  130. #endif /* !CONFIG_X86_64 */
  131. /* Multipliers for offsets within the PTEs */
  132. #define PTE_LEVEL_MULT (PAGE_SIZE)
  133. #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
  134. #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
  135. #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
  136. #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
  137. #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
  138. ({ \
  139. if (to_dmesg) \
  140. printk(KERN_INFO fmt, ##args); \
  141. else \
  142. if (m) \
  143. seq_printf(m, fmt, ##args); \
  144. })
  145. #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
  146. ({ \
  147. if (to_dmesg) \
  148. printk(KERN_CONT fmt, ##args); \
  149. else \
  150. if (m) \
  151. seq_printf(m, fmt, ##args); \
  152. })
  153. /*
  154. * Print a readable form of a pgprot_t to the seq_file
  155. */
  156. static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
  157. {
  158. pgprotval_t pr = pgprot_val(prot);
  159. static const char * const level_name[] =
  160. { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
  161. if (!(pr & _PAGE_PRESENT)) {
  162. /* Not present */
  163. pt_dump_cont_printf(m, dmsg, " ");
  164. } else {
  165. if (pr & _PAGE_USER)
  166. pt_dump_cont_printf(m, dmsg, "USR ");
  167. else
  168. pt_dump_cont_printf(m, dmsg, " ");
  169. if (pr & _PAGE_RW)
  170. pt_dump_cont_printf(m, dmsg, "RW ");
  171. else
  172. pt_dump_cont_printf(m, dmsg, "ro ");
  173. if (pr & _PAGE_PWT)
  174. pt_dump_cont_printf(m, dmsg, "PWT ");
  175. else
  176. pt_dump_cont_printf(m, dmsg, " ");
  177. if (pr & _PAGE_PCD)
  178. pt_dump_cont_printf(m, dmsg, "PCD ");
  179. else
  180. pt_dump_cont_printf(m, dmsg, " ");
  181. /* Bit 7 has a different meaning on level 3 vs 4 */
  182. if (level <= 4 && pr & _PAGE_PSE)
  183. pt_dump_cont_printf(m, dmsg, "PSE ");
  184. else
  185. pt_dump_cont_printf(m, dmsg, " ");
  186. if ((level == 5 && pr & _PAGE_PAT) ||
  187. ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
  188. pt_dump_cont_printf(m, dmsg, "PAT ");
  189. else
  190. pt_dump_cont_printf(m, dmsg, " ");
  191. if (pr & _PAGE_GLOBAL)
  192. pt_dump_cont_printf(m, dmsg, "GLB ");
  193. else
  194. pt_dump_cont_printf(m, dmsg, " ");
  195. if (pr & _PAGE_NX)
  196. pt_dump_cont_printf(m, dmsg, "NX ");
  197. else
  198. pt_dump_cont_printf(m, dmsg, "x ");
  199. }
  200. pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
  201. }
  202. /*
  203. * On 64 bits, sign-extend the 48 bit address to 64 bit
  204. */
  205. static unsigned long normalize_addr(unsigned long u)
  206. {
  207. int shift;
  208. if (!IS_ENABLED(CONFIG_X86_64))
  209. return u;
  210. shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
  211. return (signed long)(u << shift) >> shift;
  212. }
  213. /*
  214. * This function gets called on a break in a continuous series
  215. * of PTE entries; the next one is different so we need to
  216. * print what we collected so far.
  217. */
  218. static void note_page(struct seq_file *m, struct pg_state *st,
  219. pgprot_t new_prot, pgprotval_t new_eff, int level)
  220. {
  221. pgprotval_t prot, cur, eff;
  222. static const char units[] = "BKMGTPE";
  223. /*
  224. * If we have a "break" in the series, we need to flush the state that
  225. * we have now. "break" is either changing perms, levels or
  226. * address space marker.
  227. */
  228. prot = pgprot_val(new_prot);
  229. cur = pgprot_val(st->current_prot);
  230. eff = st->effective_prot;
  231. if (!st->level) {
  232. /* First entry */
  233. st->current_prot = new_prot;
  234. st->effective_prot = new_eff;
  235. st->level = level;
  236. st->marker = address_markers;
  237. st->lines = 0;
  238. pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
  239. st->marker->name);
  240. } else if (prot != cur || new_eff != eff || level != st->level ||
  241. st->current_address >= st->marker[1].start_address) {
  242. const char *unit = units;
  243. unsigned long delta;
  244. int width = sizeof(unsigned long) * 2;
  245. if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) {
  246. WARN_ONCE(1,
  247. "x86/mm: Found insecure W+X mapping at address %p/%pS\n",
  248. (void *)st->start_address,
  249. (void *)st->start_address);
  250. st->wx_pages += (st->current_address -
  251. st->start_address) / PAGE_SIZE;
  252. }
  253. /*
  254. * Now print the actual finished series
  255. */
  256. if (!st->marker->max_lines ||
  257. st->lines < st->marker->max_lines) {
  258. pt_dump_seq_printf(m, st->to_dmesg,
  259. "0x%0*lx-0x%0*lx ",
  260. width, st->start_address,
  261. width, st->current_address);
  262. delta = st->current_address - st->start_address;
  263. while (!(delta & 1023) && unit[1]) {
  264. delta >>= 10;
  265. unit++;
  266. }
  267. pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
  268. delta, *unit);
  269. printk_prot(m, st->current_prot, st->level,
  270. st->to_dmesg);
  271. }
  272. st->lines++;
  273. /*
  274. * We print markers for special areas of address space,
  275. * such as the start of vmalloc space etc.
  276. * This helps in the interpretation.
  277. */
  278. if (st->current_address >= st->marker[1].start_address) {
  279. if (st->marker->max_lines &&
  280. st->lines > st->marker->max_lines) {
  281. unsigned long nskip =
  282. st->lines - st->marker->max_lines;
  283. pt_dump_seq_printf(m, st->to_dmesg,
  284. "... %lu entr%s skipped ... \n",
  285. nskip,
  286. nskip == 1 ? "y" : "ies");
  287. }
  288. st->marker++;
  289. st->lines = 0;
  290. pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
  291. st->marker->name);
  292. }
  293. st->start_address = st->current_address;
  294. st->current_prot = new_prot;
  295. st->effective_prot = new_eff;
  296. st->level = level;
  297. }
  298. }
  299. static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
  300. {
  301. return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
  302. ((prot1 | prot2) & _PAGE_NX);
  303. }
  304. static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
  305. pgprotval_t eff_in, unsigned long P)
  306. {
  307. int i;
  308. pte_t *pte;
  309. pgprotval_t prot, eff;
  310. for (i = 0; i < PTRS_PER_PTE; i++) {
  311. st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
  312. pte = pte_offset_map(&addr, st->current_address);
  313. prot = pte_flags(*pte);
  314. eff = effective_prot(eff_in, prot);
  315. note_page(m, st, __pgprot(prot), eff, 5);
  316. pte_unmap(pte);
  317. }
  318. }
  319. #ifdef CONFIG_KASAN
  320. /*
  321. * This is an optimization for KASAN=y case. Since all kasan page tables
  322. * eventually point to the kasan_zero_page we could call note_page()
  323. * right away without walking through lower level page tables. This saves
  324. * us dozens of seconds (minutes for 5-level config) while checking for
  325. * W+X mapping or reading kernel_page_tables debugfs file.
  326. */
  327. static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
  328. void *pt)
  329. {
  330. if (__pa(pt) == __pa(kasan_zero_pmd) ||
  331. (pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) ||
  332. __pa(pt) == __pa(kasan_zero_pud)) {
  333. pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
  334. note_page(m, st, __pgprot(prot), 0, 5);
  335. return true;
  336. }
  337. return false;
  338. }
  339. #else
  340. static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
  341. void *pt)
  342. {
  343. return false;
  344. }
  345. #endif
  346. #if PTRS_PER_PMD > 1
  347. static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
  348. pgprotval_t eff_in, unsigned long P)
  349. {
  350. int i;
  351. pmd_t *start, *pmd_start;
  352. pgprotval_t prot, eff;
  353. pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
  354. for (i = 0; i < PTRS_PER_PMD; i++) {
  355. st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
  356. if (!pmd_none(*start)) {
  357. prot = pmd_flags(*start);
  358. eff = effective_prot(eff_in, prot);
  359. if (pmd_large(*start) || !pmd_present(*start)) {
  360. note_page(m, st, __pgprot(prot), eff, 4);
  361. } else if (!kasan_page_table(m, st, pmd_start)) {
  362. walk_pte_level(m, st, *start, eff,
  363. P + i * PMD_LEVEL_MULT);
  364. }
  365. } else
  366. note_page(m, st, __pgprot(0), 0, 4);
  367. start++;
  368. }
  369. }
  370. #else
  371. #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
  372. #define pud_large(a) pmd_large(__pmd(pud_val(a)))
  373. #define pud_none(a) pmd_none(__pmd(pud_val(a)))
  374. #endif
  375. #if PTRS_PER_PUD > 1
  376. static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
  377. pgprotval_t eff_in, unsigned long P)
  378. {
  379. int i;
  380. pud_t *start, *pud_start;
  381. pgprotval_t prot, eff;
  382. pud_t *prev_pud = NULL;
  383. pud_start = start = (pud_t *)p4d_page_vaddr(addr);
  384. for (i = 0; i < PTRS_PER_PUD; i++) {
  385. st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
  386. if (!pud_none(*start)) {
  387. prot = pud_flags(*start);
  388. eff = effective_prot(eff_in, prot);
  389. if (pud_large(*start) || !pud_present(*start)) {
  390. note_page(m, st, __pgprot(prot), eff, 3);
  391. } else if (!kasan_page_table(m, st, pud_start)) {
  392. walk_pmd_level(m, st, *start, eff,
  393. P + i * PUD_LEVEL_MULT);
  394. }
  395. } else
  396. note_page(m, st, __pgprot(0), 0, 3);
  397. prev_pud = start;
  398. start++;
  399. }
  400. }
  401. #else
  402. #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
  403. #define p4d_large(a) pud_large(__pud(p4d_val(a)))
  404. #define p4d_none(a) pud_none(__pud(p4d_val(a)))
  405. #endif
  406. static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
  407. pgprotval_t eff_in, unsigned long P)
  408. {
  409. int i;
  410. p4d_t *start, *p4d_start;
  411. pgprotval_t prot, eff;
  412. if (PTRS_PER_P4D == 1)
  413. return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P);
  414. p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
  415. for (i = 0; i < PTRS_PER_P4D; i++) {
  416. st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
  417. if (!p4d_none(*start)) {
  418. prot = p4d_flags(*start);
  419. eff = effective_prot(eff_in, prot);
  420. if (p4d_large(*start) || !p4d_present(*start)) {
  421. note_page(m, st, __pgprot(prot), eff, 2);
  422. } else if (!kasan_page_table(m, st, p4d_start)) {
  423. walk_pud_level(m, st, *start, eff,
  424. P + i * P4D_LEVEL_MULT);
  425. }
  426. } else
  427. note_page(m, st, __pgprot(0), 0, 2);
  428. start++;
  429. }
  430. }
  431. #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
  432. #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
  433. static inline bool is_hypervisor_range(int idx)
  434. {
  435. #ifdef CONFIG_X86_64
  436. /*
  437. * ffff800000000000 - ffff87ffffffffff is reserved for
  438. * the hypervisor.
  439. */
  440. return (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
  441. (idx < pgd_index(__PAGE_OFFSET));
  442. #else
  443. return false;
  444. #endif
  445. }
  446. static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
  447. bool checkwx, bool dmesg)
  448. {
  449. #ifdef CONFIG_X86_64
  450. pgd_t *start = (pgd_t *) &init_top_pgt;
  451. #else
  452. pgd_t *start = swapper_pg_dir;
  453. #endif
  454. pgprotval_t prot, eff;
  455. int i;
  456. struct pg_state st = {};
  457. if (pgd) {
  458. start = pgd;
  459. st.to_dmesg = dmesg;
  460. }
  461. st.check_wx = checkwx;
  462. if (checkwx)
  463. st.wx_pages = 0;
  464. for (i = 0; i < PTRS_PER_PGD; i++) {
  465. st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
  466. if (!pgd_none(*start) && !is_hypervisor_range(i)) {
  467. prot = pgd_flags(*start);
  468. #ifdef CONFIG_X86_PAE
  469. eff = _PAGE_USER | _PAGE_RW;
  470. #else
  471. eff = prot;
  472. #endif
  473. if (pgd_large(*start) || !pgd_present(*start)) {
  474. note_page(m, &st, __pgprot(prot), eff, 1);
  475. } else {
  476. walk_p4d_level(m, &st, *start, eff,
  477. i * PGD_LEVEL_MULT);
  478. }
  479. } else
  480. note_page(m, &st, __pgprot(0), 0, 1);
  481. cond_resched();
  482. start++;
  483. }
  484. /* Flush out the last page */
  485. st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
  486. note_page(m, &st, __pgprot(0), 0, 0);
  487. if (!checkwx)
  488. return;
  489. if (st.wx_pages)
  490. pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
  491. st.wx_pages);
  492. else
  493. pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
  494. }
  495. void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
  496. {
  497. ptdump_walk_pgd_level_core(m, pgd, false, true);
  498. }
  499. void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
  500. {
  501. #ifdef CONFIG_PAGE_TABLE_ISOLATION
  502. if (user && static_cpu_has(X86_FEATURE_PTI))
  503. pgd = kernel_to_user_pgdp(pgd);
  504. #endif
  505. ptdump_walk_pgd_level_core(m, pgd, false, false);
  506. }
  507. EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
  508. static void ptdump_walk_user_pgd_level_checkwx(void)
  509. {
  510. #ifdef CONFIG_PAGE_TABLE_ISOLATION
  511. pgd_t *pgd = (pgd_t *) &init_top_pgt;
  512. if (!static_cpu_has(X86_FEATURE_PTI))
  513. return;
  514. pr_info("x86/mm: Checking user space page tables\n");
  515. pgd = kernel_to_user_pgdp(pgd);
  516. ptdump_walk_pgd_level_core(NULL, pgd, true, false);
  517. #endif
  518. }
  519. void ptdump_walk_pgd_level_checkwx(void)
  520. {
  521. ptdump_walk_pgd_level_core(NULL, NULL, true, false);
  522. ptdump_walk_user_pgd_level_checkwx();
  523. }
  524. static int __init pt_dump_init(void)
  525. {
  526. /*
  527. * Various markers are not compile-time constants, so assign them
  528. * here.
  529. */
  530. #ifdef CONFIG_X86_64
  531. address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
  532. address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
  533. address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
  534. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  535. address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
  536. #endif
  537. #ifdef CONFIG_KASAN
  538. address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
  539. address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
  540. #endif
  541. #endif
  542. #ifdef CONFIG_X86_32
  543. address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
  544. address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
  545. # ifdef CONFIG_HIGHMEM
  546. address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
  547. # endif
  548. address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
  549. address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
  550. #endif
  551. return 0;
  552. }
  553. __initcall(pt_dump_init);