|
@@ -910,11 +910,14 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
|
|
|
VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
|
|
|
|
|
|
old_pmd = *pmd;
|
|
|
- kvm_set_pmd(pmd, *new_pmd);
|
|
|
- if (pmd_present(old_pmd))
|
|
|
+ if (pmd_present(old_pmd)) {
|
|
|
+ pmd_clear(pmd);
|
|
|
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
|
- else
|
|
|
+ } else {
|
|
|
get_page(virt_to_page(pmd));
|
|
|
+ }
|
|
|
+
|
|
|
+ kvm_set_pmd(pmd, *new_pmd);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -963,12 +966,14 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
|
|
|
|
|
/* Create 2nd stage page table mapping - Level 3 */
|
|
|
old_pte = *pte;
|
|
|
- kvm_set_pte(pte, *new_pte);
|
|
|
- if (pte_present(old_pte))
|
|
|
+ if (pte_present(old_pte)) {
|
|
|
+ kvm_set_pte(pte, __pte(0));
|
|
|
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
|
- else
|
|
|
+ } else {
|
|
|
get_page(virt_to_page(pte));
|
|
|
+ }
|
|
|
|
|
|
+ kvm_set_pte(pte, *new_pte);
|
|
|
return 0;
|
|
|
}
|
|
|
|