|
@@ -28,8 +28,6 @@
|
|
|
#include <linux/memblock.h>
|
|
|
#include <linux/fs.h>
|
|
|
#include <linux/io.h>
|
|
|
-#include <linux/slab.h>
|
|
|
-#include <linux/stop_machine.h>
|
|
|
|
|
|
#include <asm/barrier.h>
|
|
|
#include <asm/cputype.h>
|
|
@@ -42,6 +40,7 @@
|
|
|
#include <asm/tlb.h>
|
|
|
#include <asm/memblock.h>
|
|
|
#include <asm/mmu_context.h>
|
|
|
+#include <asm/ptdump.h>
|
|
|
|
|
|
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
|
|
|
|
|
@@ -95,11 +94,24 @@ static phys_addr_t __init early_pgtable_alloc(void)
|
|
|
return phys;
|
|
|
}
|
|
|
|
|
|
+static bool pgattr_change_is_safe(u64 old, u64 new)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * The following mapping attributes may be updated in live
|
|
|
+ * kernel mappings without the need for break-before-make.
|
|
|
+ */
|
|
|
+ static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
|
|
|
+
|
|
|
+ return old == 0 || new == 0 || ((old ^ new) & ~mask) == 0;
|
|
|
+}
|
|
|
+
|
|
|
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|
|
unsigned long end, unsigned long pfn,
|
|
|
pgprot_t prot,
|
|
|
- phys_addr_t (*pgtable_alloc)(void))
|
|
|
+ phys_addr_t (*pgtable_alloc)(void),
|
|
|
+ bool page_mappings_only)
|
|
|
{
|
|
|
+ pgprot_t __prot = prot;
|
|
|
pte_t *pte;
|
|
|
|
|
|
BUG_ON(pmd_sect(*pmd));
|
|
@@ -115,8 +127,28 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|
|
|
|
|
pte = pte_set_fixmap_offset(pmd, addr);
|
|
|
do {
|
|
|
- set_pte(pte, pfn_pte(pfn, prot));
|
|
|
+ pte_t old_pte = *pte;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Set the contiguous bit for the subsequent group of PTEs if
|
|
|
+ * its size and alignment are appropriate.
|
|
|
+ */
|
|
|
+ if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) {
|
|
|
+ if (end - addr >= CONT_PTE_SIZE && !page_mappings_only)
|
|
|
+ __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
|
|
|
+ else
|
|
|
+ __prot = prot;
|
|
|
+ }
|
|
|
+
|
|
|
+ set_pte(pte, pfn_pte(pfn, __prot));
|
|
|
pfn++;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * After the PTE entry has been populated once, we
|
|
|
+ * only allow updates to the permission attributes.
|
|
|
+ */
|
|
|
+ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
|
|
|
+
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
|
|
|
pte_clear_fixmap();
|
|
@@ -125,8 +157,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|
|
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
|
|
phys_addr_t phys, pgprot_t prot,
|
|
|
phys_addr_t (*pgtable_alloc)(void),
|
|
|
- bool allow_block_mappings)
|
|
|
+ bool page_mappings_only)
|
|
|
{
|
|
|
+ pgprot_t __prot = prot;
|
|
|
pmd_t *pmd;
|
|
|
unsigned long next;
|
|
|
|
|
@@ -146,27 +179,39 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
|
|
|
|
|
pmd = pmd_set_fixmap_offset(pud, addr);
|
|
|
do {
|
|
|
+ pmd_t old_pmd = *pmd;
|
|
|
+
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
+
|
|
|
/* try section mapping first */
|
|
|
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
|
|
|
- allow_block_mappings) {
|
|
|
- pmd_t old_pmd =*pmd;
|
|
|
- pmd_set_huge(pmd, phys, prot);
|
|
|
+ !page_mappings_only) {
|
|
|
/*
|
|
|
- * Check for previous table entries created during
|
|
|
- * boot (__create_page_tables) and flush them.
|
|
|
+ * Set the contiguous bit for the subsequent group of
|
|
|
+ * PMDs if its size and alignment are appropriate.
|
|
|
*/
|
|
|
- if (!pmd_none(old_pmd)) {
|
|
|
- flush_tlb_all();
|
|
|
- if (pmd_table(old_pmd)) {
|
|
|
- phys_addr_t table = pmd_page_paddr(old_pmd);
|
|
|
- if (!WARN_ON_ONCE(slab_is_available()))
|
|
|
- memblock_free(table, PAGE_SIZE);
|
|
|
- }
|
|
|
+ if (((addr | phys) & ~CONT_PMD_MASK) == 0) {
|
|
|
+ if (end - addr >= CONT_PMD_SIZE)
|
|
|
+ __prot = __pgprot(pgprot_val(prot) |
|
|
|
+ PTE_CONT);
|
|
|
+ else
|
|
|
+ __prot = prot;
|
|
|
}
|
|
|
+ pmd_set_huge(pmd, phys, __prot);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * After the PMD entry has been populated once, we
|
|
|
+ * only allow updates to the permission attributes.
|
|
|
+ */
|
|
|
+ BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
|
|
|
+ pmd_val(*pmd)));
|
|
|
} else {
|
|
|
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
|
|
|
- prot, pgtable_alloc);
|
|
|
+ prot, pgtable_alloc,
|
|
|
+ page_mappings_only);
|
|
|
+
|
|
|
+ BUG_ON(pmd_val(old_pmd) != 0 &&
|
|
|
+ pmd_val(old_pmd) != pmd_val(*pmd));
|
|
|
}
|
|
|
phys += next - addr;
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
@@ -189,7 +234,7 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
|
|
|
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|
|
phys_addr_t phys, pgprot_t prot,
|
|
|
phys_addr_t (*pgtable_alloc)(void),
|
|
|
- bool allow_block_mappings)
|
|
|
+ bool page_mappings_only)
|
|
|
{
|
|
|
pud_t *pud;
|
|
|
unsigned long next;
|
|
@@ -204,33 +249,28 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|
|
|
|
|
pud = pud_set_fixmap_offset(pgd, addr);
|
|
|
do {
|
|
|
+ pud_t old_pud = *pud;
|
|
|
+
|
|
|
next = pud_addr_end(addr, end);
|
|
|
|
|
|
/*
|
|
|
* For 4K granule only, attempt to put down a 1GB block
|
|
|
*/
|
|
|
- if (use_1G_block(addr, next, phys) && allow_block_mappings) {
|
|
|
- pud_t old_pud = *pud;
|
|
|
+ if (use_1G_block(addr, next, phys) && !page_mappings_only) {
|
|
|
pud_set_huge(pud, phys, prot);
|
|
|
|
|
|
/*
|
|
|
- * If we have an old value for a pud, it will
|
|
|
- * be pointing to a pmd table that we no longer
|
|
|
- * need (from swapper_pg_dir).
|
|
|
- *
|
|
|
- * Look up the old pmd table and free it.
|
|
|
+ * After the PUD entry has been populated once, we
|
|
|
+ * only allow updates to the permission attributes.
|
|
|
*/
|
|
|
- if (!pud_none(old_pud)) {
|
|
|
- flush_tlb_all();
|
|
|
- if (pud_table(old_pud)) {
|
|
|
- phys_addr_t table = pud_page_paddr(old_pud);
|
|
|
- if (!WARN_ON_ONCE(slab_is_available()))
|
|
|
- memblock_free(table, PAGE_SIZE);
|
|
|
- }
|
|
|
- }
|
|
|
+ BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
|
|
|
+ pud_val(*pud)));
|
|
|
} else {
|
|
|
alloc_init_pmd(pud, addr, next, phys, prot,
|
|
|
- pgtable_alloc, allow_block_mappings);
|
|
|
+ pgtable_alloc, page_mappings_only);
|
|
|
+
|
|
|
+ BUG_ON(pud_val(old_pud) != 0 &&
|
|
|
+ pud_val(old_pud) != pud_val(*pud));
|
|
|
}
|
|
|
phys += next - addr;
|
|
|
} while (pud++, addr = next, addr != end);
|
|
@@ -242,7 +282,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
|
|
unsigned long virt, phys_addr_t size,
|
|
|
pgprot_t prot,
|
|
|
phys_addr_t (*pgtable_alloc)(void),
|
|
|
- bool allow_block_mappings)
|
|
|
+ bool page_mappings_only)
|
|
|
{
|
|
|
unsigned long addr, length, end, next;
|
|
|
pgd_t *pgd = pgd_offset_raw(pgdir, virt);
|
|
@@ -262,7 +302,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
|
|
do {
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
|
|
|
- allow_block_mappings);
|
|
|
+ page_mappings_only);
|
|
|
phys += next - addr;
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
}
|
|
@@ -291,17 +331,17 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
|
|
|
&phys, virt);
|
|
|
return;
|
|
|
}
|
|
|
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
|
|
|
+ __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false);
|
|
|
}
|
|
|
|
|
|
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
|
|
unsigned long virt, phys_addr_t size,
|
|
|
- pgprot_t prot, bool allow_block_mappings)
|
|
|
+ pgprot_t prot, bool page_mappings_only)
|
|
|
{
|
|
|
BUG_ON(mm == &init_mm);
|
|
|
|
|
|
__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
|
|
|
- pgd_pgtable_alloc, allow_block_mappings);
|
|
|
+ pgd_pgtable_alloc, page_mappings_only);
|
|
|
}
|
|
|
|
|
|
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
|
|
@@ -314,7 +354,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
|
|
|
}
|
|
|
|
|
|
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
|
|
|
- NULL, !debug_pagealloc_enabled());
|
|
|
+ NULL, debug_pagealloc_enabled());
|
|
|
}
|
|
|
|
|
|
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
|
|
@@ -332,7 +372,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
|
|
|
__create_pgd_mapping(pgd, start, __phys_to_virt(start),
|
|
|
end - start, PAGE_KERNEL,
|
|
|
early_pgtable_alloc,
|
|
|
- !debug_pagealloc_enabled());
|
|
|
+ debug_pagealloc_enabled());
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -345,13 +385,13 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
|
|
|
__phys_to_virt(start),
|
|
|
kernel_start - start, PAGE_KERNEL,
|
|
|
early_pgtable_alloc,
|
|
|
- !debug_pagealloc_enabled());
|
|
|
+ debug_pagealloc_enabled());
|
|
|
if (kernel_end < end)
|
|
|
__create_pgd_mapping(pgd, kernel_end,
|
|
|
__phys_to_virt(kernel_end),
|
|
|
end - kernel_end, PAGE_KERNEL,
|
|
|
early_pgtable_alloc,
|
|
|
- !debug_pagealloc_enabled());
|
|
|
+ debug_pagealloc_enabled());
|
|
|
|
|
|
/*
|
|
|
* Map the linear alias of the [_text, __init_begin) interval as
|
|
@@ -361,7 +401,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
|
|
|
*/
|
|
|
__create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
|
|
|
kernel_end - kernel_start, PAGE_KERNEL_RO,
|
|
|
- early_pgtable_alloc, !debug_pagealloc_enabled());
|
|
|
+ early_pgtable_alloc, debug_pagealloc_enabled());
|
|
|
}
|
|
|
|
|
|
static void __init map_mem(pgd_t *pgd)
|
|
@@ -396,6 +436,11 @@ void mark_rodata_ro(void)
|
|
|
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
|
|
|
create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
|
|
|
section_size, PAGE_KERNEL_RO);
|
|
|
+
|
|
|
+ /* flush the TLBs after updating live kernel mappings */
|
|
|
+ flush_tlb_all();
|
|
|
+
|
|
|
+ debug_checkwx();
|
|
|
}
|
|
|
|
|
|
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
|
|
@@ -408,7 +453,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
|
|
|
BUG_ON(!PAGE_ALIGNED(size));
|
|
|
|
|
|
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
|
|
|
- early_pgtable_alloc, !debug_pagealloc_enabled());
|
|
|
+ early_pgtable_alloc, debug_pagealloc_enabled());
|
|
|
|
|
|
vma->addr = va_start;
|
|
|
vma->phys_addr = pa_start;
|