|
@@ -417,11 +417,11 @@ void vmalloc_sync_all(void)
|
|
*/
|
|
*/
|
|
static noinline int vmalloc_fault(unsigned long address)
|
|
static noinline int vmalloc_fault(unsigned long address)
|
|
{
|
|
{
|
|
- pgd_t *pgd, *pgd_ref;
|
|
|
|
- p4d_t *p4d, *p4d_ref;
|
|
|
|
- pud_t *pud, *pud_ref;
|
|
|
|
- pmd_t *pmd, *pmd_ref;
|
|
|
|
- pte_t *pte, *pte_ref;
|
|
|
|
|
|
+ pgd_t *pgd, *pgd_k;
|
|
|
|
+ p4d_t *p4d, *p4d_k;
|
|
|
|
+ pud_t *pud;
|
|
|
|
+ pmd_t *pmd;
|
|
|
|
+ pte_t *pte;
|
|
|
|
|
|
/* Make sure we are in vmalloc area: */
|
|
/* Make sure we are in vmalloc area: */
|
|
if (!(address >= VMALLOC_START && address < VMALLOC_END))
|
|
if (!(address >= VMALLOC_START && address < VMALLOC_END))
|
|
@@ -435,73 +435,51 @@ static noinline int vmalloc_fault(unsigned long address)
|
|
* case just flush:
|
|
* case just flush:
|
|
*/
|
|
*/
|
|
pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
|
|
pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
|
|
- pgd_ref = pgd_offset_k(address);
|
|
|
|
- if (pgd_none(*pgd_ref))
|
|
|
|
|
|
+ pgd_k = pgd_offset_k(address);
|
|
|
|
+ if (pgd_none(*pgd_k))
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
if (pgtable_l5_enabled) {
|
|
if (pgtable_l5_enabled) {
|
|
if (pgd_none(*pgd)) {
|
|
if (pgd_none(*pgd)) {
|
|
- set_pgd(pgd, *pgd_ref);
|
|
|
|
|
|
+ set_pgd(pgd, *pgd_k);
|
|
arch_flush_lazy_mmu_mode();
|
|
arch_flush_lazy_mmu_mode();
|
|
} else {
|
|
} else {
|
|
- BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
|
|
|
|
|
|
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
/* With 4-level paging, copying happens on the p4d level. */
|
|
/* With 4-level paging, copying happens on the p4d level. */
|
|
p4d = p4d_offset(pgd, address);
|
|
p4d = p4d_offset(pgd, address);
|
|
- p4d_ref = p4d_offset(pgd_ref, address);
|
|
|
|
- if (p4d_none(*p4d_ref))
|
|
|
|
|
|
+ p4d_k = p4d_offset(pgd_k, address);
|
|
|
|
+ if (p4d_none(*p4d_k))
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
if (p4d_none(*p4d) && !pgtable_l5_enabled) {
|
|
if (p4d_none(*p4d) && !pgtable_l5_enabled) {
|
|
- set_p4d(p4d, *p4d_ref);
|
|
|
|
|
|
+ set_p4d(p4d, *p4d_k);
|
|
arch_flush_lazy_mmu_mode();
|
|
arch_flush_lazy_mmu_mode();
|
|
} else {
|
|
} else {
|
|
- BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_ref));
|
|
|
|
|
|
+ BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * Below here mismatches are bugs because these lower tables
|
|
|
|
- * are shared:
|
|
|
|
- */
|
|
|
|
BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
|
|
BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
|
|
|
|
|
|
pud = pud_offset(p4d, address);
|
|
pud = pud_offset(p4d, address);
|
|
- pud_ref = pud_offset(p4d_ref, address);
|
|
|
|
- if (pud_none(*pud_ref))
|
|
|
|
|
|
+ if (pud_none(*pud))
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
- if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
|
|
|
|
- BUG();
|
|
|
|
-
|
|
|
|
if (pud_large(*pud))
|
|
if (pud_large(*pud))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
pmd = pmd_offset(pud, address);
|
|
- pmd_ref = pmd_offset(pud_ref, address);
|
|
|
|
- if (pmd_none(*pmd_ref))
|
|
|
|
|
|
+ if (pmd_none(*pmd))
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
- if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
|
|
|
|
- BUG();
|
|
|
|
-
|
|
|
|
if (pmd_large(*pmd))
|
|
if (pmd_large(*pmd))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- pte_ref = pte_offset_kernel(pmd_ref, address);
|
|
|
|
- if (!pte_present(*pte_ref))
|
|
|
|
- return -1;
|
|
|
|
-
|
|
|
|
pte = pte_offset_kernel(pmd, address);
|
|
pte = pte_offset_kernel(pmd, address);
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Don't use pte_page here, because the mappings can point
|
|
|
|
- * outside mem_map, and the NUMA hash lookup cannot handle
|
|
|
|
- * that:
|
|
|
|
- */
|
|
|
|
- if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
|
|
|
|
- BUG();
|
|
|
|
|
|
+ if (!pte_present(*pte))
|
|
|
|
+ return -1;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|