|
@@ -147,6 +147,26 @@ preserve_boot_args:
|
|
|
b __inval_dcache_area // tail call
|
|
|
ENDPROC(preserve_boot_args)
|
|
|
|
|
|
+/*
|
|
|
+ * Macro to arrange a physical address in a page table entry, taking care of
|
|
|
+ * 52-bit addresses.
|
|
|
+ *
|
|
|
+ * Preserves: phys
|
|
|
+ * Returns: pte
|
|
|
+ */
|
|
|
+ .macro phys_to_pte, phys, pte
|
|
|
+#ifdef CONFIG_ARM64_PA_BITS_52
|
|
|
+ /*
|
|
|
+ * We assume \phys is 64K aligned and this is guaranteed by only
|
|
|
+ * supporting this configuration with 64K pages.
|
|
|
+ */
|
|
|
+ orr \pte, \phys, \phys, lsr #36
|
|
|
+ and \pte, \pte, #PTE_ADDR_MASK
|
|
|
+#else
|
|
|
+ mov \pte, \phys
|
|
|
+#endif
|
|
|
+ .endm
|
|
|
+
|
|
|
/*
|
|
|
* Macro to create a table entry to the next page.
|
|
|
*
|
|
@@ -156,14 +176,16 @@ ENDPROC(preserve_boot_args)
|
|
|
* ptrs: #imm pointers per table page
|
|
|
*
|
|
|
* Preserves: virt
|
|
|
- * Corrupts: tmp1, tmp2
|
|
|
+ * Corrupts: ptrs, tmp1, tmp2
|
|
|
* Returns: tbl -> next level table page address
|
|
|
*/
|
|
|
.macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
|
|
|
- lsr \tmp1, \virt, #\shift
|
|
|
- and \tmp1, \tmp1, #\ptrs - 1 // table index
|
|
|
- add \tmp2, \tbl, #PAGE_SIZE
|
|
|
+ add \tmp1, \tbl, #PAGE_SIZE
|
|
|
+ phys_to_pte \tmp1, \tmp2
|
|
|
orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
|
|
|
+ lsr \tmp1, \virt, #\shift
|
|
|
+ sub \ptrs, \ptrs, #1
|
|
|
+ and \tmp1, \tmp1, \ptrs // table index
|
|
|
str \tmp2, [\tbl, \tmp1, lsl #3]
|
|
|
add \tbl, \tbl, #PAGE_SIZE // next level table page
|
|
|
.endm
|
|
@@ -173,15 +195,17 @@ ENDPROC(preserve_boot_args)
|
|
|
* block entry in the next level (tbl) for the given virtual address.
|
|
|
*
|
|
|
* Preserves: tbl, next, virt
|
|
|
- * Corrupts: tmp1, tmp2
|
|
|
+ * Corrupts: ptrs_per_pgd, tmp1, tmp2
|
|
|
*/
|
|
|
- .macro create_pgd_entry, tbl, virt, tmp1, tmp2
|
|
|
- create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
|
|
|
+ .macro create_pgd_entry, tbl, virt, ptrs_per_pgd, tmp1, tmp2
|
|
|
+ create_table_entry \tbl, \virt, PGDIR_SHIFT, \ptrs_per_pgd, \tmp1, \tmp2
|
|
|
#if SWAPPER_PGTABLE_LEVELS > 3
|
|
|
- create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2
|
|
|
+ mov \ptrs_per_pgd, PTRS_PER_PUD
|
|
|
+ create_table_entry \tbl, \virt, PUD_SHIFT, \ptrs_per_pgd, \tmp1, \tmp2
|
|
|
#endif
|
|
|
#if SWAPPER_PGTABLE_LEVELS > 2
|
|
|
- create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
|
|
|
+ mov \ptrs_per_pgd, PTRS_PER_PTE
|
|
|
+ create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, \ptrs_per_pgd, \tmp1, \tmp2
|
|
|
#endif
|
|
|
.endm
|
|
|
|
|
@@ -190,16 +214,17 @@ ENDPROC(preserve_boot_args)
|
|
|
* virtual range (inclusive).
|
|
|
*
|
|
|
* Preserves: tbl, flags
|
|
|
- * Corrupts: phys, start, end, pstate
|
|
|
+ * Corrupts: phys, start, end, tmp, pstate
|
|
|
*/
|
|
|
- .macro create_block_map, tbl, flags, phys, start, end
|
|
|
- lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT
|
|
|
+ .macro create_block_map, tbl, flags, phys, start, end, tmp
|
|
|
lsr \start, \start, #SWAPPER_BLOCK_SHIFT
|
|
|
and \start, \start, #PTRS_PER_PTE - 1 // table index
|
|
|
- orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry
|
|
|
+ bic \phys, \phys, #SWAPPER_BLOCK_SIZE - 1
|
|
|
lsr \end, \end, #SWAPPER_BLOCK_SHIFT
|
|
|
and \end, \end, #PTRS_PER_PTE - 1 // table end index
|
|
|
-9999: str \phys, [\tbl, \start, lsl #3] // store the entry
|
|
|
+9999: phys_to_pte \phys, \tmp
|
|
|
+ orr \tmp, \tmp, \flags // table entry
|
|
|
+ str \tmp, [\tbl, \start, lsl #3] // store the entry
|
|
|
add \start, \start, #1 // next entry
|
|
|
add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block
|
|
|
cmp \start, \end
|
|
@@ -244,26 +269,13 @@ __create_page_tables:
|
|
|
adrp x0, idmap_pg_dir
|
|
|
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
|
|
|
|
|
|
-#ifndef CONFIG_ARM64_VA_BITS_48
|
|
|
-#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
|
|
|
-#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT))
|
|
|
-
|
|
|
- /*
|
|
|
- * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
|
|
|
- * created that covers system RAM if that is located sufficiently high
|
|
|
- * in the physical address space. So for the ID map, use an extended
|
|
|
- * virtual range in that case, by configuring an additional translation
|
|
|
- * level.
|
|
|
- * First, we have to verify our assumption that the current value of
|
|
|
- * VA_BITS was chosen such that all translation levels are fully
|
|
|
- * utilised, and that lowering T0SZ will always result in an additional
|
|
|
- * translation level to be configured.
|
|
|
- */
|
|
|
-#if VA_BITS != EXTRA_SHIFT
|
|
|
-#error "Mismatch between VA_BITS and page size/number of translation levels"
|
|
|
-#endif
|
|
|
-
|
|
|
/*
|
|
|
+ * VA_BITS may be too small to allow for an ID mapping to be created
|
|
|
+ * that covers system RAM if that is located sufficiently high in the
|
|
|
+ * physical address space. So for the ID map, use an extended virtual
|
|
|
+ * range in that case, and configure an additional translation level
|
|
|
+ * if needed.
|
|
|
+ *
|
|
|
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
|
|
|
* entire ID map region can be mapped. As T0SZ == (64 - #bits used),
|
|
|
* this number conveniently equals the number of leading zeroes in
|
|
@@ -272,21 +284,44 @@ __create_page_tables:
|
|
|
adrp x5, __idmap_text_end
|
|
|
clz x5, x5
|
|
|
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
|
|
|
- b.ge 1f // .. then skip additional level
|
|
|
+ b.ge 1f // .. then skip VA range extension
|
|
|
|
|
|
adr_l x6, idmap_t0sz
|
|
|
str x5, [x6]
|
|
|
dmb sy
|
|
|
dc ivac, x6 // Invalidate potentially stale cache line
|
|
|
|
|
|
- create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
|
|
|
-1:
|
|
|
+#if (VA_BITS < 48)
|
|
|
+#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
|
|
|
+#define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If VA_BITS < 48, we have to configure an additional table level.
|
|
|
+ * First, we have to verify our assumption that the current value of
|
|
|
+ * VA_BITS was chosen such that all translation levels are fully
|
|
|
+ * utilised, and that lowering T0SZ will always result in an additional
|
|
|
+ * translation level to be configured.
|
|
|
+ */
|
|
|
+#if VA_BITS != EXTRA_SHIFT
|
|
|
+#error "Mismatch between VA_BITS and page size/number of translation levels"
|
|
|
#endif
|
|
|
|
|
|
- create_pgd_entry x0, x3, x5, x6
|
|
|
+ mov x4, EXTRA_PTRS
|
|
|
+ create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
|
|
|
+#else
|
|
|
+ /*
|
|
|
+ * If VA_BITS == 48, we don't have to configure an additional
|
|
|
+ * translation level, but the top-level table has more entries.
|
|
|
+ */
|
|
|
+ mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
|
|
|
+ str_l x4, idmap_ptrs_per_pgd, x5
|
|
|
+#endif
|
|
|
+1:
|
|
|
+ ldr_l x4, idmap_ptrs_per_pgd
|
|
|
+ create_pgd_entry x0, x3, x4, x5, x6
|
|
|
mov x5, x3 // __pa(__idmap_text_start)
|
|
|
adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
|
|
|
- create_block_map x0, x7, x3, x5, x6
|
|
|
+ create_block_map x0, x7, x3, x5, x6, x4
|
|
|
|
|
|
/*
|
|
|
* Map the kernel image (starting with PHYS_OFFSET).
|
|
@@ -294,12 +329,13 @@ __create_page_tables:
|
|
|
adrp x0, swapper_pg_dir
|
|
|
mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
|
|
|
add x5, x5, x23 // add KASLR displacement
|
|
|
- create_pgd_entry x0, x5, x3, x6
|
|
|
+ mov x4, PTRS_PER_PGD
|
|
|
+ create_pgd_entry x0, x5, x4, x3, x6
|
|
|
adrp x6, _end // runtime __pa(_end)
|
|
|
adrp x3, _text // runtime __pa(_text)
|
|
|
sub x6, x6, x3 // _end - _text
|
|
|
add x6, x6, x5 // runtime __va(_end)
|
|
|
- create_block_map x0, x7, x3, x5, x6
|
|
|
+ create_block_map x0, x7, x3, x5, x6, x4
|
|
|
|
|
|
/*
|
|
|
* Since the page tables have been populated with non-cacheable
|
|
@@ -679,8 +715,10 @@ ENTRY(__enable_mmu)
|
|
|
update_early_cpu_boot_status 0, x1, x2
|
|
|
adrp x1, idmap_pg_dir
|
|
|
adrp x2, swapper_pg_dir
|
|
|
- msr ttbr0_el1, x1 // load TTBR0
|
|
|
- msr ttbr1_el1, x2 // load TTBR1
|
|
|
+ phys_to_ttbr x1, x3
|
|
|
+ phys_to_ttbr x2, x4
|
|
|
+ msr ttbr0_el1, x3 // load TTBR0
|
|
|
+ msr ttbr1_el1, x4 // load TTBR1
|
|
|
isb
|
|
|
msr sctlr_el1, x0
|
|
|
isb
|