|
@@ -957,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
|
|
return !pfn_valid(pfn);
|
|
return !pfn_valid(pfn);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
|
|
|
|
+ unsigned long size, bool uncached)
|
|
|
|
+{
|
|
|
|
+ __coherent_cache_guest_page(vcpu, pfn, size, uncached);
|
|
|
|
+}
|
|
|
|
+
|
|
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
struct kvm_memory_slot *memslot, unsigned long hva,
|
|
struct kvm_memory_slot *memslot, unsigned long hva,
|
|
unsigned long fault_status)
|
|
unsigned long fault_status)
|
|
@@ -1046,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
kvm_set_s2pmd_writable(&new_pmd);
|
|
kvm_set_s2pmd_writable(&new_pmd);
|
|
kvm_set_pfn_dirty(pfn);
|
|
kvm_set_pfn_dirty(pfn);
|
|
}
|
|
}
|
|
- coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
|
|
|
|
- fault_ipa_uncached);
|
|
|
|
|
|
+ coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
|
|
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
|
|
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
|
|
} else {
|
|
} else {
|
|
pte_t new_pte = pfn_pte(pfn, mem_type);
|
|
pte_t new_pte = pfn_pte(pfn, mem_type);
|
|
@@ -1055,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
kvm_set_s2pte_writable(&new_pte);
|
|
kvm_set_s2pte_writable(&new_pte);
|
|
kvm_set_pfn_dirty(pfn);
|
|
kvm_set_pfn_dirty(pfn);
|
|
}
|
|
}
|
|
- coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
|
|
|
|
- fault_ipa_uncached);
|
|
|
|
|
|
+ coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
|
|
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
|
|
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
|
|
pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
|
|
pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
|
|
}
|
|
}
|