|
@@ -692,6 +692,18 @@ static bool try_to_free_pmd_page(pmd_t *pmd)
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool try_to_free_pud_page(pud_t *pud)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < PTRS_PER_PUD; i++)
|
|
|
|
+ if (!pud_none(pud[i]))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ free_page((unsigned long)pud);
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
|
|
static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
|
|
{
|
|
{
|
|
pte_t *pte = pte_offset_kernel(pmd, start);
|
|
pte_t *pte = pte_offset_kernel(pmd, start);
|
|
@@ -805,6 +817,16 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
|
|
*/
|
|
*/
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
|
|
|
|
+{
|
|
|
|
+ pgd_t *pgd_entry = root + pgd_index(addr);
|
|
|
|
+
|
|
|
|
+ unmap_pud_range(pgd_entry, addr, end);
|
|
|
|
+
|
|
|
|
+ if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
|
|
|
|
+ pgd_clear(pgd_entry);
|
|
|
|
+}
|
|
|
|
+
|
|
static int alloc_pte_page(pmd_t *pmd)
|
|
static int alloc_pte_page(pmd_t *pmd)
|
|
{
|
|
{
|
|
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
|
|
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
|
|
@@ -999,9 +1021,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
|
|
static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
|
|
static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
|
|
{
|
|
{
|
|
pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
|
|
pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
|
|
- bool allocd_pgd = false;
|
|
|
|
- pgd_t *pgd_entry;
|
|
|
|
pud_t *pud = NULL; /* shut up gcc */
|
|
pud_t *pud = NULL; /* shut up gcc */
|
|
|
|
+ pgd_t *pgd_entry;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
pgd_entry = cpa->pgd + pgd_index(addr);
|
|
pgd_entry = cpa->pgd + pgd_index(addr);
|
|
@@ -1015,7 +1036,6 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
|
|
set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
|
|
- allocd_pgd = true;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
|
|
pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
|
|
@@ -1023,19 +1043,11 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
|
|
|
|
|
|
ret = populate_pud(cpa, addr, pgd_entry, pgprot);
|
|
ret = populate_pud(cpa, addr, pgd_entry, pgprot);
|
|
if (ret < 0) {
|
|
if (ret < 0) {
|
|
- unmap_pud_range(pgd_entry, addr,
|
|
|
|
|
|
+ unmap_pgd_range(cpa->pgd, addr,
|
|
addr + (cpa->numpages << PAGE_SHIFT));
|
|
addr + (cpa->numpages << PAGE_SHIFT));
|
|
-
|
|
|
|
- if (allocd_pgd) {
|
|
|
|
- /*
|
|
|
|
- * If I allocated this PUD page, I can just as well
|
|
|
|
- * free it in this error path.
|
|
|
|
- */
|
|
|
|
- pgd_clear(pgd_entry);
|
|
|
|
- free_page((unsigned long)pud);
|
|
|
|
- }
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
+
|
|
cpa->numpages = ret;
|
|
cpa->numpages = ret;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1861,6 +1873,12 @@ out:
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
|
|
|
|
+ unsigned numpages)
|
|
|
|
+{
|
|
|
|
+ unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* The testcases use internal knowledge of the implementation that shouldn't
|
|
* The testcases use internal knowledge of the implementation that shouldn't
|
|
* be exposed to the rest of the kernel. Include these directly here.
|
|
* be exposed to the rest of the kernel. Include these directly here.
|