|
@@ -30,6 +30,7 @@
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/stop_machine.h>
|
|
|
|
|
|
+#include <asm/barrier.h>
|
|
|
#include <asm/cputype.h>
|
|
|
#include <asm/fixmap.h>
|
|
|
#include <asm/kernel-pgtable.h>
|
|
@@ -62,15 +63,18 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|
|
}
|
|
|
EXPORT_SYMBOL(phys_mem_access_prot);
|
|
|
|
|
|
-static void __init *early_alloc(unsigned long sz)
|
|
|
+static void __init *early_pgtable_alloc(void)
|
|
|
{
|
|
|
phys_addr_t phys;
|
|
|
void *ptr;
|
|
|
|
|
|
- phys = memblock_alloc(sz, sz);
|
|
|
+ phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
|
|
BUG_ON(!phys);
|
|
|
ptr = __va(phys);
|
|
|
- memset(ptr, 0, sz);
|
|
|
+ memset(ptr, 0, PAGE_SIZE);
|
|
|
+
|
|
|
+ /* Ensure the zeroed page is visible to the page table walker */
|
|
|
+ dsb(ishst);
|
|
|
return ptr;
|
|
|
}
|
|
|
|
|
@@ -95,12 +99,12 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
|
|
|
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|
|
unsigned long end, unsigned long pfn,
|
|
|
pgprot_t prot,
|
|
|
- void *(*alloc)(unsigned long size))
|
|
|
+ void *(*pgtable_alloc)(void))
|
|
|
{
|
|
|
pte_t *pte;
|
|
|
|
|
|
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
|
|
|
- pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
|
|
|
+ pte = pgtable_alloc();
|
|
|
if (pmd_sect(*pmd))
|
|
|
split_pmd(pmd, pte);
|
|
|
__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
|
|
@@ -130,7 +134,7 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
|
|
|
static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
|
|
|
unsigned long addr, unsigned long end,
|
|
|
phys_addr_t phys, pgprot_t prot,
|
|
|
- void *(*alloc)(unsigned long size))
|
|
|
+ void *(*pgtable_alloc)(void))
|
|
|
{
|
|
|
pmd_t *pmd;
|
|
|
unsigned long next;
|
|
@@ -139,7 +143,7 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
|
|
|
* Check for initial section mappings in the pgd/pud and remove them.
|
|
|
*/
|
|
|
if (pud_none(*pud) || pud_sect(*pud)) {
|
|
|
- pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
|
|
|
+ pmd = pgtable_alloc();
|
|
|
if (pud_sect(*pud)) {
|
|
|
/*
|
|
|
* need to have the 1G of mappings continue to be
|
|
@@ -174,7 +178,7 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
|
|
|
}
|
|
|
} else {
|
|
|
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
|
|
|
- prot, alloc);
|
|
|
+ prot, pgtable_alloc);
|
|
|
}
|
|
|
phys += next - addr;
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
@@ -195,13 +199,13 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
|
|
|
static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
|
|
|
unsigned long addr, unsigned long end,
|
|
|
phys_addr_t phys, pgprot_t prot,
|
|
|
- void *(*alloc)(unsigned long size))
|
|
|
+ void *(*pgtable_alloc)(void))
|
|
|
{
|
|
|
pud_t *pud;
|
|
|
unsigned long next;
|
|
|
|
|
|
if (pgd_none(*pgd)) {
|
|
|
- pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
|
|
|
+ pud = pgtable_alloc();
|
|
|
pgd_populate(mm, pgd, pud);
|
|
|
}
|
|
|
BUG_ON(pgd_bad(*pgd));
|
|
@@ -234,7 +238,8 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
|
- alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
|
|
|
+ alloc_init_pmd(mm, pud, addr, next, phys, prot,
|
|
|
+ pgtable_alloc);
|
|
|
}
|
|
|
phys += next - addr;
|
|
|
} while (pud++, addr = next, addr != end);
|
|
@@ -247,7 +252,7 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
|
|
|
static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
|
|
|
phys_addr_t phys, unsigned long virt,
|
|
|
phys_addr_t size, pgprot_t prot,
|
|
|
- void *(*alloc)(unsigned long size))
|
|
|
+ void *(*pgtable_alloc)(void))
|
|
|
{
|
|
|
unsigned long addr, length, end, next;
|
|
|
|
|
@@ -265,18 +270,18 @@ static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
|
|
|
end = addr + length;
|
|
|
do {
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
- alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
|
|
|
+ alloc_init_pud(mm, pgd, addr, next, phys, prot, pgtable_alloc);
|
|
|
phys += next - addr;
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
}
|
|
|
|
|
|
-static void *late_alloc(unsigned long size)
|
|
|
+static void *late_pgtable_alloc(void)
|
|
|
{
|
|
|
- void *ptr;
|
|
|
-
|
|
|
- BUG_ON(size > PAGE_SIZE);
|
|
|
- ptr = (void *)__get_free_page(PGALLOC_GFP);
|
|
|
+ void *ptr = (void *)__get_free_page(PGALLOC_GFP);
|
|
|
BUG_ON(!ptr);
|
|
|
+
|
|
|
+ /* Ensure the zeroed page is visible to the page table walker */
|
|
|
+ dsb(ishst);
|
|
|
return ptr;
|
|
|
}
|
|
|
|
|
@@ -289,7 +294,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
|
|
|
return;
|
|
|
}
|
|
|
__create_mapping(&init_mm, pgd_offset_k(virt), phys, virt,
|
|
|
- size, prot, early_alloc);
|
|
|
+ size, prot, early_pgtable_alloc);
|
|
|
}
|
|
|
|
|
|
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
|
@@ -297,7 +302,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
|
|
pgprot_t prot)
|
|
|
{
|
|
|
__create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
|
|
|
- late_alloc);
|
|
|
+ late_pgtable_alloc);
|
|
|
}
|
|
|
|
|
|
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
|
|
@@ -310,7 +315,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
|
|
|
}
|
|
|
|
|
|
return __create_mapping(&init_mm, pgd_offset_k(virt),
|
|
|
- phys, virt, size, prot, late_alloc);
|
|
|
+ phys, virt, size, prot, late_pgtable_alloc);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_RODATA
|
|
@@ -460,15 +465,12 @@ void __init paging_init(void)
|
|
|
fixup_executable();
|
|
|
|
|
|
/* allocate the zero page. */
|
|
|
- zero_page = early_alloc(PAGE_SIZE);
|
|
|
+ zero_page = early_pgtable_alloc();
|
|
|
|
|
|
bootmem_init();
|
|
|
|
|
|
empty_zero_page = virt_to_page(zero_page);
|
|
|
|
|
|
- /* Ensure the zero page is visible to the page table walker */
|
|
|
- dsb(ishst);
|
|
|
-
|
|
|
/*
|
|
|
* TTBR0 is only used for the identity mapping at this stage. Make it
|
|
|
* point to zero page to avoid speculatively fetching new entries.
|