|
@@ -45,6 +45,34 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
|
|
|
|
+ unsigned long addr, unsigned long end)
|
|
|
|
+{
|
|
|
|
+ unsigned long next;
|
|
|
|
+
|
|
|
|
+ for (; addr < end; addr = next) {
|
|
|
|
+ p4d_t *p4d = p4d_page + p4d_index(addr);
|
|
|
|
+ pud_t *pud;
|
|
|
|
+
|
|
|
|
+ next = (addr & P4D_MASK) + P4D_SIZE;
|
|
|
|
+ if (next > end)
|
|
|
|
+ next = end;
|
|
|
|
+
|
|
|
|
+ if (p4d_present(*p4d)) {
|
|
|
|
+ pud = pud_offset(p4d, 0);
|
|
|
|
+ ident_pud_init(info, pud, addr, next);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ pud = (pud_t *)info->alloc_pgt_page(info->context);
|
|
|
|
+ if (!pud)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ ident_pud_init(info, pud, addr, next);
|
|
|
|
+ set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
|
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
|
unsigned long pstart, unsigned long pend)
|
|
unsigned long pstart, unsigned long pend)
|
|
{
|
|
{
|
|
@@ -55,27 +83,36 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
|
|
|
|
|
for (; addr < end; addr = next) {
|
|
for (; addr < end; addr = next) {
|
|
pgd_t *pgd = pgd_page + pgd_index(addr);
|
|
pgd_t *pgd = pgd_page + pgd_index(addr);
|
|
- pud_t *pud;
|
|
|
|
|
|
+ p4d_t *p4d;
|
|
|
|
|
|
next = (addr & PGDIR_MASK) + PGDIR_SIZE;
|
|
next = (addr & PGDIR_MASK) + PGDIR_SIZE;
|
|
if (next > end)
|
|
if (next > end)
|
|
next = end;
|
|
next = end;
|
|
|
|
|
|
if (pgd_present(*pgd)) {
|
|
if (pgd_present(*pgd)) {
|
|
- pud = pud_offset(pgd, 0);
|
|
|
|
- result = ident_pud_init(info, pud, addr, next);
|
|
|
|
|
|
+ p4d = p4d_offset(pgd, 0);
|
|
|
|
+ result = ident_p4d_init(info, p4d, addr, next);
|
|
if (result)
|
|
if (result)
|
|
return result;
|
|
return result;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
- pud = (pud_t *)info->alloc_pgt_page(info->context);
|
|
|
|
- if (!pud)
|
|
|
|
|
|
+ p4d = (p4d_t *)info->alloc_pgt_page(info->context);
|
|
|
|
+ if (!p4d)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
- result = ident_pud_init(info, pud, addr, next);
|
|
|
|
|
|
+ result = ident_p4d_init(info, p4d, addr, next);
|
|
if (result)
|
|
if (result)
|
|
return result;
|
|
return result;
|
|
- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
|
|
|
|
|
|
+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
|
|
|
|
+ set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
|
|
|
|
+ } else {
|
|
|
|
+ /*
|
|
|
|
+ * With p4d folded, pgd is equal to p4d.
|
|
|
|
+ * The pgd entry has to point to the pud page table in this case.
|
|
|
|
+ */
|
|
|
|
+ pud_t *pud = pud_offset(p4d, 0);
|
|
|
|
+ set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
return 0;
|
|
return 0;
|