|
@@ -218,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
|
|
|
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
|
|
|
|
|
/* No need to invalidate the cache for device mappings */
|
|
|
- if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
|
|
|
+ if (!kvm_is_device_pfn(pte_pfn(old_pte)))
|
|
|
kvm_flush_dcache_pte(old_pte);
|
|
|
|
|
|
put_page(virt_to_page(pte));
|
|
@@ -310,7 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
|
|
|
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
|
do {
|
|
|
- if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
|
|
|
+ if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
|
|
|
kvm_flush_dcache_pte(*pte);
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
}
|