|
@@ -328,22 +328,30 @@ void __init cleanup_highmap(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Create PTE level page table mapping for physical addresses.
|
|
|
+ * It returns the last physical address mapped.
|
|
|
+ */
|
|
|
static unsigned long __meminit
|
|
|
-phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
|
|
|
+phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
|
|
|
pgprot_t prot)
|
|
|
{
|
|
|
- unsigned long pages = 0, next;
|
|
|
- unsigned long last_map_addr = end;
|
|
|
+ unsigned long pages = 0, paddr_next;
|
|
|
+ unsigned long paddr_last = paddr_end;
|
|
|
+ pte_t *pte;
|
|
|
int i;
|
|
|
|
|
|
- pte_t *pte = pte_page + pte_index(addr);
|
|
|
+ pte = pte_page + pte_index(paddr);
|
|
|
+ i = pte_index(paddr);
|
|
|
|
|
|
- for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
|
|
|
- next = (addr & PAGE_MASK) + PAGE_SIZE;
|
|
|
- if (addr >= end) {
|
|
|
+ for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
|
|
|
+ paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
|
|
|
+ if (paddr >= paddr_end) {
|
|
|
if (!after_bootmem &&
|
|
|
- !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
|
|
|
- !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
|
|
|
+ !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
|
|
|
+ E820_RAM) &&
|
|
|
+ !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
|
|
|
+ E820_RESERVED_KERN))
|
|
|
set_pte(pte, __pte(0));
|
|
|
continue;
|
|
|
}
|
|
@@ -361,37 +369,44 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
|
|
|
}
|
|
|
|
|
|
if (0)
|
|
|
- printk(" pte=%p addr=%lx pte=%016lx\n",
|
|
|
- pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
|
|
|
+ pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr,
|
|
|
+ pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
|
|
|
pages++;
|
|
|
- set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
|
|
|
- last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
|
|
|
+ set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
|
|
|
+ paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
|
|
|
}
|
|
|
|
|
|
update_page_count(PG_LEVEL_4K, pages);
|
|
|
|
|
|
- return last_map_addr;
|
|
|
+ return paddr_last;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Create PMD level page table mapping for physical addresses. The virtual
|
|
|
+ * and physical address have to be aligned at this level.
|
|
|
+ * It returns the last physical address mapped.
|
|
|
+ */
|
|
|
static unsigned long __meminit
|
|
|
-phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
|
|
+phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
|
|
|
unsigned long page_size_mask, pgprot_t prot)
|
|
|
{
|
|
|
- unsigned long pages = 0, next;
|
|
|
- unsigned long last_map_addr = end;
|
|
|
+ unsigned long pages = 0, paddr_next;
|
|
|
+ unsigned long paddr_last = paddr_end;
|
|
|
|
|
|
- int i = pmd_index(address);
|
|
|
+ int i = pmd_index(paddr);
|
|
|
|
|
|
- for (; i < PTRS_PER_PMD; i++, address = next) {
|
|
|
- pmd_t *pmd = pmd_page + pmd_index(address);
|
|
|
+ for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
|
|
|
+ pmd_t *pmd = pmd_page + pmd_index(paddr);
|
|
|
pte_t *pte;
|
|
|
pgprot_t new_prot = prot;
|
|
|
|
|
|
- next = (address & PMD_MASK) + PMD_SIZE;
|
|
|
- if (address >= end) {
|
|
|
+ paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
|
|
|
+ if (paddr >= paddr_end) {
|
|
|
if (!after_bootmem &&
|
|
|
- !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
|
|
|
- !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
|
|
|
+ !e820_any_mapped(paddr & PMD_MASK, paddr_next,
|
|
|
+ E820_RAM) &&
|
|
|
+ !e820_any_mapped(paddr & PMD_MASK, paddr_next,
|
|
|
+ E820_RESERVED_KERN))
|
|
|
set_pmd(pmd, __pmd(0));
|
|
|
continue;
|
|
|
}
|
|
@@ -400,8 +415,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
|
|
if (!pmd_large(*pmd)) {
|
|
|
spin_lock(&init_mm.page_table_lock);
|
|
|
pte = (pte_t *)pmd_page_vaddr(*pmd);
|
|
|
- last_map_addr = phys_pte_init(pte, address,
|
|
|
- end, prot);
|
|
|
+ paddr_last = phys_pte_init(pte, paddr,
|
|
|
+ paddr_end, prot);
|
|
|
spin_unlock(&init_mm.page_table_lock);
|
|
|
continue;
|
|
|
}
|
|
@@ -420,7 +435,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
|
|
if (page_size_mask & (1 << PG_LEVEL_2M)) {
|
|
|
if (!after_bootmem)
|
|
|
pages++;
|
|
|
- last_map_addr = next;
|
|
|
+ paddr_last = paddr_next;
|
|
|
continue;
|
|
|
}
|
|
|
new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
|
|
@@ -430,42 +445,54 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
|
|
pages++;
|
|
|
spin_lock(&init_mm.page_table_lock);
|
|
|
set_pte((pte_t *)pmd,
|
|
|
- pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
|
|
|
+ pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
|
|
|
__pgprot(pgprot_val(prot) | _PAGE_PSE)));
|
|
|
spin_unlock(&init_mm.page_table_lock);
|
|
|
- last_map_addr = next;
|
|
|
+ paddr_last = paddr_next;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
pte = alloc_low_page();
|
|
|
- last_map_addr = phys_pte_init(pte, address, end, new_prot);
|
|
|
+ paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
|
|
|
|
|
|
spin_lock(&init_mm.page_table_lock);
|
|
|
pmd_populate_kernel(&init_mm, pmd, pte);
|
|
|
spin_unlock(&init_mm.page_table_lock);
|
|
|
}
|
|
|
update_page_count(PG_LEVEL_2M, pages);
|
|
|
- return last_map_addr;
|
|
|
+ return paddr_last;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Create PUD level page table mapping for physical addresses. The virtual
|
|
|
+ * and physical address do not have to be aligned at this level. KASLR can
|
|
|
+ * randomize virtual addresses up to this level.
|
|
|
+ * It returns the last physical address mapped.
|
|
|
+ */
|
|
|
static unsigned long __meminit
|
|
|
-phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
|
|
|
- unsigned long page_size_mask)
|
|
|
+phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
|
|
|
+ unsigned long page_size_mask)
|
|
|
{
|
|
|
- unsigned long pages = 0, next;
|
|
|
- unsigned long last_map_addr = end;
|
|
|
- int i = pud_index(addr);
|
|
|
+ unsigned long pages = 0, paddr_next;
|
|
|
+ unsigned long paddr_last = paddr_end;
|
|
|
+ unsigned long vaddr = (unsigned long)__va(paddr);
|
|
|
+ int i = pud_index(vaddr);
|
|
|
|
|
|
- for (; i < PTRS_PER_PUD; i++, addr = next) {
|
|
|
- pud_t *pud = pud_page + pud_index(addr);
|
|
|
+ for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
|
|
|
+ pud_t *pud;
|
|
|
pmd_t *pmd;
|
|
|
pgprot_t prot = PAGE_KERNEL;
|
|
|
|
|
|
- next = (addr & PUD_MASK) + PUD_SIZE;
|
|
|
- if (addr >= end) {
|
|
|
+ vaddr = (unsigned long)__va(paddr);
|
|
|
+ pud = pud_page + pud_index(vaddr);
|
|
|
+ paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
|
|
|
+
|
|
|
+ if (paddr >= paddr_end) {
|
|
|
if (!after_bootmem &&
|
|
|
- !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
|
|
|
- !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
|
|
|
+ !e820_any_mapped(paddr & PUD_MASK, paddr_next,
|
|
|
+ E820_RAM) &&
|
|
|
+ !e820_any_mapped(paddr & PUD_MASK, paddr_next,
|
|
|
+ E820_RESERVED_KERN))
|
|
|
set_pud(pud, __pud(0));
|
|
|
continue;
|
|
|
}
|
|
@@ -473,8 +500,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
|
|
|
if (!pud_none(*pud)) {
|
|
|
if (!pud_large(*pud)) {
|
|
|
pmd = pmd_offset(pud, 0);
|
|
|
- last_map_addr = phys_pmd_init(pmd, addr, end,
|
|
|
- page_size_mask, prot);
|
|
|
+ paddr_last = phys_pmd_init(pmd, paddr,
|
|
|
+ paddr_end,
|
|
|
+ page_size_mask,
|
|
|
+ prot);
|
|
|
__flush_tlb_all();
|
|
|
continue;
|
|
|
}
|
|
@@ -493,7 +522,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
|
|
|
if (page_size_mask & (1 << PG_LEVEL_1G)) {
|
|
|
if (!after_bootmem)
|
|
|
pages++;
|
|
|
- last_map_addr = next;
|
|
|
+ paddr_last = paddr_next;
|
|
|
continue;
|
|
|
}
|
|
|
prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
|
|
@@ -503,16 +532,16 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
|
|
|
pages++;
|
|
|
spin_lock(&init_mm.page_table_lock);
|
|
|
set_pte((pte_t *)pud,
|
|
|
- pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
|
|
|
+ pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
|
|
|
PAGE_KERNEL_LARGE));
|
|
|
spin_unlock(&init_mm.page_table_lock);
|
|
|
- last_map_addr = next;
|
|
|
+ paddr_last = paddr_next;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
pmd = alloc_low_page();
|
|
|
- last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
|
|
|
- prot);
|
|
|
+ paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
|
|
|
+ page_size_mask, prot);
|
|
|
|
|
|
spin_lock(&init_mm.page_table_lock);
|
|
|
pud_populate(&init_mm, pud, pmd);
|
|
@@ -522,38 +551,44 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
|
|
|
|
|
|
update_page_count(PG_LEVEL_1G, pages);
|
|
|
|
|
|
- return last_map_addr;
|
|
|
+ return paddr_last;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Create page table mapping for the physical memory for specific physical
|
|
|
+ * addresses. The virtual and physical addresses have to be aligned on PMD level
|
|
|
+ * down. It returns the last physical address mapped.
|
|
|
+ */
|
|
|
unsigned long __meminit
|
|
|
-kernel_physical_mapping_init(unsigned long start,
|
|
|
- unsigned long end,
|
|
|
+kernel_physical_mapping_init(unsigned long paddr_start,
|
|
|
+ unsigned long paddr_end,
|
|
|
unsigned long page_size_mask)
|
|
|
{
|
|
|
bool pgd_changed = false;
|
|
|
- unsigned long next, last_map_addr = end;
|
|
|
- unsigned long addr;
|
|
|
+ unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
|
|
|
|
|
|
- start = (unsigned long)__va(start);
|
|
|
- end = (unsigned long)__va(end);
|
|
|
- addr = start;
|
|
|
+ paddr_last = paddr_end;
|
|
|
+ vaddr = (unsigned long)__va(paddr_start);
|
|
|
+ vaddr_end = (unsigned long)__va(paddr_end);
|
|
|
+ vaddr_start = vaddr;
|
|
|
|
|
|
- for (; start < end; start = next) {
|
|
|
- pgd_t *pgd = pgd_offset_k(start);
|
|
|
+ for (; vaddr < vaddr_end; vaddr = vaddr_next) {
|
|
|
+ pgd_t *pgd = pgd_offset_k(vaddr);
|
|
|
pud_t *pud;
|
|
|
|
|
|
- next = (start & PGDIR_MASK) + PGDIR_SIZE;
|
|
|
+ vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
|
|
|
|
|
|
if (pgd_val(*pgd)) {
|
|
|
pud = (pud_t *)pgd_page_vaddr(*pgd);
|
|
|
- last_map_addr = phys_pud_init(pud, __pa(start),
|
|
|
- __pa(end), page_size_mask);
|
|
|
+ paddr_last = phys_pud_init(pud, __pa(vaddr),
|
|
|
+ __pa(vaddr_end),
|
|
|
+ page_size_mask);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
pud = alloc_low_page();
|
|
|
- last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
|
|
|
- page_size_mask);
|
|
|
+ paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
|
|
|
+ page_size_mask);
|
|
|
|
|
|
spin_lock(&init_mm.page_table_lock);
|
|
|
pgd_populate(&init_mm, pgd, pud);
|
|
@@ -562,11 +597,11 @@ kernel_physical_mapping_init(unsigned long start,
|
|
|
}
|
|
|
|
|
|
if (pgd_changed)
|
|
|
- sync_global_pgds(addr, end - 1, 0);
|
|
|
+ sync_global_pgds(vaddr_start, vaddr_end - 1, 0);
|
|
|
|
|
|
__flush_tlb_all();
|
|
|
|
|
|
- return last_map_addr;
|
|
|
+ return paddr_last;
|
|
|
}
|
|
|
|
|
|
#ifndef CONFIG_NUMA
|