|
|
@@ -261,11 +261,12 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
|
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
|
pmd_k = pmd_offset(pud_k, address);
|
|
|
- if (!pmd_present(*pmd_k))
|
|
|
- return NULL;
|
|
|
|
|
|
- if (!pmd_present(*pmd))
|
|
|
+ if (pmd_present(*pmd) != pmd_present(*pmd_k))
|
|
|
set_pmd(pmd, *pmd_k);
|
|
|
+
|
|
|
+ if (!pmd_present(*pmd_k))
|
|
|
+ return NULL;
|
|
|
else
|
|
|
BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
|
|
|
|
|
|
@@ -287,17 +288,13 @@ void vmalloc_sync_all(void)
|
|
|
spin_lock(&pgd_lock);
|
|
|
list_for_each_entry(page, &pgd_list, lru) {
|
|
|
spinlock_t *pgt_lock;
|
|
|
- pmd_t *ret;
|
|
|
|
|
|
/* the pgt_lock only for Xen */
|
|
|
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
|
|
|
|
|
|
spin_lock(pgt_lock);
|
|
|
- ret = vmalloc_sync_one(page_address(page), address);
|
|
|
+ vmalloc_sync_one(page_address(page), address);
|
|
|
spin_unlock(pgt_lock);
|
|
|
-
|
|
|
- if (!ret)
|
|
|
- break;
|
|
|
}
|
|
|
spin_unlock(&pgd_lock);
|
|
|
}
|