|
@@ -746,6 +746,14 @@ static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ if (kvm_vcpu_trap_is_iabt(vcpu))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ return kvm_vcpu_dabt_iswrite(vcpu);
|
|
|
|
+}
|
|
|
|
+
|
|
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
struct kvm_memory_slot *memslot, unsigned long hva,
|
|
struct kvm_memory_slot *memslot, unsigned long hva,
|
|
unsigned long fault_status)
|
|
unsigned long fault_status)
|
|
@@ -760,7 +768,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
pfn_t pfn;
|
|
pfn_t pfn;
|
|
pgprot_t mem_type = PAGE_S2;
|
|
pgprot_t mem_type = PAGE_S2;
|
|
|
|
|
|
- write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
|
|
|
|
|
|
+ write_fault = kvm_is_write_fault(vcpu);
|
|
if (fault_status == FSC_PERM && !write_fault) {
|
|
if (fault_status == FSC_PERM && !write_fault) {
|
|
kvm_err("Unexpected L2 read permission error\n");
|
|
kvm_err("Unexpected L2 read permission error\n");
|
|
return -EFAULT;
|
|
return -EFAULT;
|
|
@@ -886,7 +894,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
gfn = fault_ipa >> PAGE_SHIFT;
|
|
gfn = fault_ipa >> PAGE_SHIFT;
|
|
memslot = gfn_to_memslot(vcpu->kvm, gfn);
|
|
memslot = gfn_to_memslot(vcpu->kvm, gfn);
|
|
hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
|
|
hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
|
|
- write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
|
|
|
|
|
|
+ write_fault = kvm_is_write_fault(vcpu);
|
|
if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
|
|
if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
|
|
if (is_iabt) {
|
|
if (is_iabt) {
|
|
/* Prefetch Abort on I/O address */
|
|
/* Prefetch Abort on I/O address */
|