|
@@ -15,6 +15,8 @@
|
|
|
|
|
|
extern struct range pfn_mapped[E820_MAX_ENTRIES];
|
|
|
|
|
|
+static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
|
|
|
+
|
|
|
static int __init map_range(struct range *range)
|
|
|
{
|
|
|
unsigned long start;
|
|
@@ -30,8 +32,10 @@ static void __init clear_pgds(unsigned long start,
|
|
|
unsigned long end)
|
|
|
{
|
|
|
pgd_t *pgd;
|
|
|
+ /* See comment in kasan_init() */
|
|
|
+ unsigned long pgd_end = end & PGDIR_MASK;
|
|
|
|
|
|
- for (; start < end; start += PGDIR_SIZE) {
|
|
|
+ for (; start < pgd_end; start += PGDIR_SIZE) {
|
|
|
pgd = pgd_offset_k(start);
|
|
|
/*
|
|
|
* With folded p4d, pgd_clear() is nop, use p4d_clear()
|
|
@@ -42,29 +46,61 @@ static void __init clear_pgds(unsigned long start,
|
|
|
else
|
|
|
pgd_clear(pgd);
|
|
|
}
|
|
|
+
|
|
|
+ pgd = pgd_offset_k(start);
|
|
|
+ for (; start < end; start += P4D_SIZE)
|
|
|
+ p4d_clear(p4d_offset(pgd, start));
|
|
|
+}
|
|
|
+
|
|
|
+static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
|
|
|
+{
|
|
|
+ unsigned long p4d;
|
|
|
+
|
|
|
+ if (!IS_ENABLED(CONFIG_X86_5LEVEL))
|
|
|
+ return (p4d_t *)pgd;
|
|
|
+
|
|
|
+ p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
|
|
|
+ p4d += __START_KERNEL_map - phys_base;
|
|
|
+ return (p4d_t *)p4d + p4d_index(addr);
|
|
|
+}
|
|
|
+
|
|
|
+static void __init kasan_early_p4d_populate(pgd_t *pgd,
|
|
|
+ unsigned long addr,
|
|
|
+ unsigned long end)
|
|
|
+{
|
|
|
+ pgd_t pgd_entry;
|
|
|
+ p4d_t *p4d, p4d_entry;
|
|
|
+ unsigned long next;
|
|
|
+
|
|
|
+ if (pgd_none(*pgd)) {
|
|
|
+ pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
|
|
|
+ set_pgd(pgd, pgd_entry);
|
|
|
+ }
|
|
|
+
|
|
|
+ p4d = early_p4d_offset(pgd, addr);
|
|
|
+ do {
|
|
|
+ next = p4d_addr_end(addr, end);
|
|
|
+
|
|
|
+ if (!p4d_none(*p4d))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
|
|
|
+ set_p4d(p4d, p4d_entry);
|
|
|
+ } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
|
|
|
}
|
|
|
|
|
|
static void __init kasan_map_early_shadow(pgd_t *pgd)
|
|
|
{
|
|
|
- int i;
|
|
|
- unsigned long start = KASAN_SHADOW_START;
|
|
|
+ /* See comment in kasan_init() */
|
|
|
+ unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
|
|
|
unsigned long end = KASAN_SHADOW_END;
|
|
|
+ unsigned long next;
|
|
|
|
|
|
- for (i = pgd_index(start); start < end; i++) {
|
|
|
- switch (CONFIG_PGTABLE_LEVELS) {
|
|
|
- case 4:
|
|
|
- pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
|
|
|
- _KERNPG_TABLE);
|
|
|
- break;
|
|
|
- case 5:
|
|
|
- pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
|
|
|
- _KERNPG_TABLE);
|
|
|
- break;
|
|
|
- default:
|
|
|
- BUILD_BUG();
|
|
|
- }
|
|
|
- start += PGDIR_SIZE;
|
|
|
- }
|
|
|
+ pgd += pgd_index(addr);
|
|
|
+ do {
|
|
|
+ next = pgd_addr_end(addr, end);
|
|
|
+ kasan_early_p4d_populate(pgd, addr, next);
|
|
|
+ } while (pgd++, addr = next, addr != end);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_KASAN_INLINE
|
|
@@ -101,7 +137,7 @@ void __init kasan_early_init(void)
|
|
|
for (i = 0; i < PTRS_PER_PUD; i++)
|
|
|
kasan_zero_pud[i] = __pud(pud_val);
|
|
|
|
|
|
- for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
|
|
|
+ for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
|
|
|
kasan_zero_p4d[i] = __p4d(p4d_val);
|
|
|
|
|
|
kasan_map_early_shadow(early_top_pgt);
|
|
@@ -117,12 +153,35 @@ void __init kasan_init(void)
|
|
|
#endif
|
|
|
|
|
|
memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We use the same shadow offset for 4- and 5-level paging to
|
|
|
+ * facilitate boot-time switching between paging modes.
|
|
|
+ * As result in 5-level paging mode KASAN_SHADOW_START and
|
|
|
+ * KASAN_SHADOW_END are not aligned to PGD boundary.
|
|
|
+ *
|
|
|
+ * KASAN_SHADOW_START doesn't share PGD with anything else.
|
|
|
+ * We claim whole PGD entry to make things easier.
|
|
|
+ *
|
|
|
+ * KASAN_SHADOW_END lands in the last PGD entry and it collides with
|
|
|
+ * bunch of things like kernel code, modules, EFI mapping, etc.
|
|
|
+ * We need to take extra steps to not overwrite them.
|
|
|
+ */
|
|
|
+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
|
|
|
+ void *ptr;
|
|
|
+
|
|
|
+ ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
|
|
|
+ memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
|
|
|
+ set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
|
|
|
+ __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
|
|
|
+ }
|
|
|
+
|
|
|
load_cr3(early_top_pgt);
|
|
|
__flush_tlb_all();
|
|
|
|
|
|
- clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
|
|
+ clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
|
|
|
|
|
|
- kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
|
|
|
+ kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
|
|
|
kasan_mem_to_shadow((void *)PAGE_OFFSET));
|
|
|
|
|
|
for (i = 0; i < E820_MAX_ENTRIES; i++) {
|