|
@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
|
|
|
}
|
|
|
EXPORT_SYMBOL(get_mem_type);
|
|
|
|
|
|
+static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
|
|
|
+
|
|
|
+static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
|
|
|
+ __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
|
|
|
+
|
|
|
+static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
|
|
|
+{
|
|
|
+ return &bm_pte[pte_index(addr)];
|
|
|
+}
|
|
|
+
|
|
|
+static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
|
|
|
+{
|
|
|
+ return pte_offset_kernel(dir, addr);
|
|
|
+}
|
|
|
+
|
|
|
+static inline pmd_t * __init fixmap_pmd(unsigned long addr)
|
|
|
+{
|
|
|
+ pgd_t *pgd = pgd_offset_k(addr);
|
|
|
+ pud_t *pud = pud_offset(pgd, addr);
|
|
|
+ pmd_t *pmd = pmd_offset(pud, addr);
|
|
|
+
|
|
|
+ return pmd;
|
|
|
+}
|
|
|
+
|
|
|
+void __init early_fixmap_init(void)
|
|
|
+{
|
|
|
+ pmd_t *pmd;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The early fixmap range spans multiple pmds, for which
|
|
|
+ * we are not prepared:
|
|
|
+ */
|
|
|
+ BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
|
|
|
+ != FIXADDR_TOP >> PMD_SHIFT);
|
|
|
+
|
|
|
+ pmd = fixmap_pmd(FIXADDR_TOP);
|
|
|
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
|
|
+
|
|
|
+ pte_offset_fixmap = pte_offset_early_fixmap;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
|
|
|
* As a result, this can only be called with preemption disabled, as under
|
|
@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
|
|
|
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
|
|
|
{
|
|
|
unsigned long vaddr = __fix_to_virt(idx);
|
|
|
- pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
|
|
|
+ pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
|
|
|
|
|
|
/* Make sure fixmap region does not exceed available allocation. */
|
|
|
BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
|
|
@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
|
|
|
}
|
|
|
|
|
|
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
|
|
|
- md->virtual >= PAGE_OFFSET &&
|
|
|
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
|
|
|
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
|
|
|
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
|
|
|
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
|
@@ -1213,10 +1254,10 @@ void __init arm_mm_memblock_reserve(void)
|
|
|
|
|
|
/*
|
|
|
* Set up the device mappings. Since we clear out the page tables for all
|
|
|
- * mappings above VMALLOC_START, we will remove any debug device mappings.
|
|
|
- * This means you have to be careful how you debug this function, or any
|
|
|
- * called function. This means you can't use any function or debugging
|
|
|
- * method which may touch any device, otherwise the kernel _will_ crash.
|
|
|
+ * mappings above VMALLOC_START, except early fixmap, we might remove debug
|
|
|
+ * device mappings. This means earlycon can be used to debug this function
|
|
|
+ * Any other function or debugging method which may touch any device _will_
|
|
|
+ * crash the kernel.
|
|
|
*/
|
|
|
static void __init devicemaps_init(const struct machine_desc *mdesc)
|
|
|
{
|
|
@@ -1231,7 +1272,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
|
|
|
|
|
|
early_trap_init(vectors);
|
|
|
|
|
|
- for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
|
|
|
+ /*
|
|
|
+ * Clear page table except top pmd used by early fixmaps
|
|
|
+ */
|
|
|
+ for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
|
|
|
pmd_clear(pmd_off_k(addr));
|
|
|
|
|
|
/*
|
|
@@ -1483,6 +1527,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
|
|
|
|
|
|
#endif
|
|
|
|
|
|
+static void __init early_fixmap_shutdown(void)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
|
|
|
+
|
|
|
+ pte_offset_fixmap = pte_offset_late_fixmap;
|
|
|
+ pmd_clear(fixmap_pmd(va));
|
|
|
+ local_flush_tlb_kernel_page(va);
|
|
|
+
|
|
|
+ for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
|
|
|
+ pte_t *pte;
|
|
|
+ struct map_desc map;
|
|
|
+
|
|
|
+ map.virtual = fix_to_virt(i);
|
|
|
+ pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
|
|
|
+
|
|
|
+ /* Only i/o device mappings are supported ATM */
|
|
|
+ if (pte_none(*pte) ||
|
|
|
+ (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ map.pfn = pte_pfn(*pte);
|
|
|
+ map.type = MT_DEVICE;
|
|
|
+ map.length = PAGE_SIZE;
|
|
|
+
|
|
|
+ create_mapping(&map);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* paging_init() sets up the page tables, initialises the zone memory
|
|
|
* maps, and sets up the zero page, bad page and bad page tables.
|
|
@@ -1495,6 +1568,7 @@ void __init paging_init(const struct machine_desc *mdesc)
|
|
|
prepare_page_table();
|
|
|
map_lowmem();
|
|
|
dma_contiguous_remap();
|
|
|
+ early_fixmap_shutdown();
|
|
|
devicemaps_init(mdesc);
|
|
|
kmap_init();
|
|
|
tcm_init();
|