|
|
@@ -719,28 +719,50 @@ int pmd_clear_huge(pmd_t *pmd)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
/**
|
|
|
* pud_free_pmd_page - Clear pud entry and free pmd page.
|
|
|
* @pud: Pointer to a PUD.
|
|
|
+ * @addr: Virtual address associated with pud.
|
|
|
*
|
|
|
- * Context: The pud range has been unmaped and TLB purged.
|
|
|
+ * Context: The pud range has been unmapped and TLB purged.
|
|
|
* Return: 1 if clearing the entry succeeded. 0 otherwise.
|
|
|
+ *
|
|
|
+ * NOTE: Callers must allow a single page allocation.
|
|
|
*/
|
|
|
-int pud_free_pmd_page(pud_t *pud)
|
|
|
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
|
|
|
{
|
|
|
- pmd_t *pmd;
|
|
|
+ pmd_t *pmd, *pmd_sv;
|
|
|
+ pte_t *pte;
|
|
|
int i;
|
|
|
|
|
|
if (pud_none(*pud))
|
|
|
return 1;
|
|
|
|
|
|
pmd = (pmd_t *)pud_page_vaddr(*pud);
|
|
|
+ pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
|
|
|
+ if (!pmd_sv)
|
|
|
+ return 0;
|
|
|
|
|
|
- for (i = 0; i < PTRS_PER_PMD; i++)
|
|
|
- if (!pmd_free_pte_page(&pmd[i]))
|
|
|
- return 0;
|
|
|
+ for (i = 0; i < PTRS_PER_PMD; i++) {
|
|
|
+ pmd_sv[i] = pmd[i];
|
|
|
+ if (!pmd_none(pmd[i]))
|
|
|
+ pmd_clear(&pmd[i]);
|
|
|
+ }
|
|
|
|
|
|
pud_clear(pud);
|
|
|
+
|
|
|
+ /* INVLPG to clear all paging-structure caches */
|
|
|
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
|
|
|
+
|
|
|
+ for (i = 0; i < PTRS_PER_PMD; i++) {
|
|
|
+ if (!pmd_none(pmd_sv[i])) {
|
|
|
+ pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
|
|
|
+ free_page((unsigned long)pte);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ free_page((unsigned long)pmd_sv);
|
|
|
free_page((unsigned long)pmd);
|
|
|
|
|
|
return 1;
|
|
|
@@ -749,11 +771,12 @@ int pud_free_pmd_page(pud_t *pud)
|
|
|
/**
|
|
|
* pmd_free_pte_page - Clear pmd entry and free pte page.
|
|
|
* @pmd: Pointer to a PMD.
|
|
|
+ * @addr: Virtual address associated with pmd.
|
|
|
*
|
|
|
- * Context: The pmd range has been unmaped and TLB purged.
|
|
|
+ * Context: The pmd range has been unmapped and TLB purged.
|
|
|
* Return: 1 if clearing the entry succeeded. 0 otherwise.
|
|
|
*/
|
|
|
-int pmd_free_pte_page(pmd_t *pmd)
|
|
|
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
|
|
|
{
|
|
|
pte_t *pte;
|
|
|
|
|
|
@@ -762,8 +785,30 @@ int pmd_free_pte_page(pmd_t *pmd)
|
|
|
|
|
|
pte = (pte_t *)pmd_page_vaddr(*pmd);
|
|
|
pmd_clear(pmd);
|
|
|
+
|
|
|
+ /* INVLPG to clear all paging-structure caches */
|
|
|
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
|
|
|
+
|
|
|
free_page((unsigned long)pte);
|
|
|
|
|
|
return 1;
|
|
|
}
|
|
|
+
|
|
|
+#else /* !CONFIG_X86_64 */
|
|
|
+
|
|
|
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
|
|
|
+{
|
|
|
+ return pud_none(*pud);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Disable free page handling on x86-PAE. This assures that ioremap()
|
|
|
+ * does not update sync'd pmd entries. See vmalloc_sync_one().
|
|
|
+ */
|
|
|
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
|
|
|
+{
|
|
|
+ return pmd_none(*pmd);
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* CONFIG_X86_64 */
|
|
|
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|