|
@@ -53,6 +53,10 @@ u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
|
|
|
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
|
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
|
|
|
|
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
|
|
|
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
|
|
|
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
|
|
|
+
|
|
|
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|
|
unsigned long size, pgprot_t vma_prot)
|
|
|
{
|
|
@@ -380,16 +384,15 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
|
|
|
|
|
|
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
|
|
|
{
|
|
|
-
|
|
|
unsigned long kernel_start = __pa(_stext);
|
|
|
- unsigned long kernel_end = __pa(_end);
|
|
|
+ unsigned long kernel_end = __pa(_etext);
|
|
|
|
|
|
/*
|
|
|
- * The kernel itself is mapped at page granularity. Map all other
|
|
|
- * memory, making sure we don't overwrite the existing kernel mappings.
|
|
|
+ * Take care not to create a writable alias for the
|
|
|
+ * read-only text and rodata sections of the kernel image.
|
|
|
*/
|
|
|
|
|
|
- /* No overlap with the kernel. */
|
|
|
+ /* No overlap with the kernel text */
|
|
|
if (end < kernel_start || start >= kernel_end) {
|
|
|
__create_pgd_mapping(pgd, start, __phys_to_virt(start),
|
|
|
end - start, PAGE_KERNEL,
|
|
@@ -398,8 +401,8 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * This block overlaps the kernel mapping. Map the portion(s) which
|
|
|
- * don't overlap.
|
|
|
+ * This block overlaps the kernel text mapping.
|
|
|
+ * Map the portion(s) which don't overlap.
|
|
|
*/
|
|
|
if (start < kernel_start)
|
|
|
__create_pgd_mapping(pgd, start,
|
|
@@ -411,6 +414,16 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
|
|
|
__phys_to_virt(kernel_end),
|
|
|
end - kernel_end, PAGE_KERNEL,
|
|
|
early_pgtable_alloc);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Map the linear alias of the [_stext, _etext) interval as
|
|
|
+ * read-only/non-executable. This makes the contents of the
|
|
|
+ * region accessible to subsystems such as hibernate, but
|
|
|
+ * protects it from inadvertent modification or execution.
|
|
|
+ */
|
|
|
+ __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
|
|
|
+ kernel_end - kernel_start, PAGE_KERNEL_RO,
|
|
|
+ early_pgtable_alloc);
|
|
|
}
|
|
|
|
|
|
static void __init map_mem(pgd_t *pgd)
|
|
@@ -431,25 +444,28 @@ static void __init map_mem(pgd_t *pgd)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_DEBUG_RODATA
|
|
|
void mark_rodata_ro(void)
|
|
|
{
|
|
|
+ if (!IS_ENABLED(CONFIG_DEBUG_RODATA))
|
|
|
+ return;
|
|
|
+
|
|
|
create_mapping_late(__pa(_stext), (unsigned long)_stext,
|
|
|
(unsigned long)_etext - (unsigned long)_stext,
|
|
|
PAGE_KERNEL_ROX);
|
|
|
-
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
void fixup_init(void)
|
|
|
{
|
|
|
- create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
|
|
|
- (unsigned long)__init_end - (unsigned long)__init_begin,
|
|
|
- PAGE_KERNEL);
|
|
|
+ /*
|
|
|
+ * Unmap the __init region but leave the VM area in place. This
|
|
|
+ * prevents the region from being reused for kernel modules, which
|
|
|
+ * is not supported by kallsyms.
|
|
|
+ */
|
|
|
+ unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
|
|
|
}
|
|
|
|
|
|
static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
|
|
|
- pgprot_t prot)
|
|
|
+ pgprot_t prot, struct vm_struct *vma)
|
|
|
{
|
|
|
phys_addr_t pa_start = __pa(va_start);
|
|
|
unsigned long size = va_end - va_start;
|
|
@@ -459,6 +475,14 @@ static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
|
|
|
|
|
|
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
|
|
|
early_pgtable_alloc);
|
|
|
+
|
|
|
+ vma->addr = va_start;
|
|
|
+ vma->phys_addr = pa_start;
|
|
|
+ vma->size = size;
|
|
|
+ vma->flags = VM_MAP;
|
|
|
+ vma->caller = __builtin_return_address(0);
|
|
|
+
|
|
|
+ vm_area_add_early(vma);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -466,17 +490,35 @@ static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
|
|
|
*/
|
|
|
static void __init map_kernel(pgd_t *pgd)
|
|
|
{
|
|
|
+ static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data;
|
|
|
|
|
|
- map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC);
|
|
|
- map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC);
|
|
|
- map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL);
|
|
|
+ map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
|
|
|
+ map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
|
|
|
+ &vmlinux_init);
|
|
|
+ map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
|
|
|
|
|
|
- /*
|
|
|
- * The fixmap falls in a separate pgd to the kernel, and doesn't live
|
|
|
- * in the carveout for the swapper_pg_dir. We can simply re-use the
|
|
|
- * existing dir for the fixmap.
|
|
|
- */
|
|
|
- set_pgd(pgd_offset_raw(pgd, FIXADDR_START), *pgd_offset_k(FIXADDR_START));
|
|
|
+ if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
|
|
|
+ /*
|
|
|
+ * The fixmap falls in a separate pgd to the kernel, and doesn't
|
|
|
+ * live in the carveout for the swapper_pg_dir. We can simply
|
|
|
+ * re-use the existing dir for the fixmap.
|
|
|
+ */
|
|
|
+ set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
|
|
|
+ *pgd_offset_k(FIXADDR_START));
|
|
|
+ } else if (CONFIG_PGTABLE_LEVELS > 3) {
|
|
|
+ /*
|
|
|
+ * The fixmap shares its top level pgd entry with the kernel
|
|
|
+ * mapping. This can really only occur when we are running
|
|
|
+ * with 16k/4 levels, so we can simply reuse the pud level
|
|
|
+ * entry instead.
|
|
|
+ */
|
|
|
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
|
|
+ set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
|
|
|
+ __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
|
|
|
+ pud_clear_fixmap();
|
|
|
+ } else {
|
|
|
+ BUG();
|
|
|
+ }
|
|
|
|
|
|
kasan_copy_shadow(pgd);
|
|
|
}
|
|
@@ -602,14 +644,6 @@ void vmemmap_free(unsigned long start, unsigned long end)
|
|
|
}
|
|
|
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
|
|
|
|
|
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
|
|
|
-#if CONFIG_PGTABLE_LEVELS > 2
|
|
|
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
|
|
|
-#endif
|
|
|
-#if CONFIG_PGTABLE_LEVELS > 3
|
|
|
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
|
|
|
-#endif
|
|
|
-
|
|
|
static inline pud_t * fixmap_pud(unsigned long addr)
|
|
|
{
|
|
|
pgd_t *pgd = pgd_offset_k(addr);
|
|
@@ -641,8 +675,18 @@ void __init early_fixmap_init(void)
|
|
|
unsigned long addr = FIXADDR_START;
|
|
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
|
- pgd_populate(&init_mm, pgd, bm_pud);
|
|
|
- pud = fixmap_pud(addr);
|
|
|
+ if (CONFIG_PGTABLE_LEVELS > 3 && !pgd_none(*pgd)) {
|
|
|
+ /*
|
|
|
+ * We only end up here if the kernel mapping and the fixmap
|
|
|
+ * share the top level pgd entry, which should only happen on
|
|
|
+ * 16k/4 levels configurations.
|
|
|
+ */
|
|
|
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
|
|
+ pud = pud_offset_kimg(pgd, addr);
|
|
|
+ } else {
|
|
|
+ pgd_populate(&init_mm, pgd, bm_pud);
|
|
|
+ pud = fixmap_pud(addr);
|
|
|
+ }
|
|
|
pud_populate(&init_mm, pud, bm_pmd);
|
|
|
pmd = fixmap_pmd(addr);
|
|
|
pmd_populate_kernel(&init_mm, pmd, bm_pte);
|