|
@@ -4085,7 +4085,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
|
|
|
|
|
|
if (gpa == UNMAPPED_GVA)
|
|
|
return X86EMUL_PROPAGATE_FAULT;
|
|
|
- ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
|
|
|
+ ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data,
|
|
|
+ offset, toread);
|
|
|
if (ret < 0) {
|
|
|
r = X86EMUL_IO_NEEDED;
|
|
|
goto out;
|
|
@@ -4106,10 +4107,24 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
|
|
|
{
|
|
|
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
|
|
|
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
|
|
+ unsigned offset;
|
|
|
+ int ret;
|
|
|
|
|
|
- return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
|
|
|
- access | PFERR_FETCH_MASK,
|
|
|
- exception);
|
|
|
+ /* Inline kvm_read_guest_virt_helper for speed. */
|
|
|
+ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
|
|
|
+ exception);
|
|
|
+ if (unlikely(gpa == UNMAPPED_GVA))
|
|
|
+ return X86EMUL_PROPAGATE_FAULT;
|
|
|
+
|
|
|
+ offset = addr & (PAGE_SIZE-1);
|
|
|
+ if (WARN_ON(offset + bytes > PAGE_SIZE))
|
|
|
+ bytes = (unsigned)PAGE_SIZE - offset;
|
|
|
+ ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val,
|
|
|
+ offset, bytes);
|
|
|
+ if (unlikely(ret < 0))
|
|
|
+ return X86EMUL_IO_NEEDED;
|
|
|
+
|
|
|
+ return X86EMUL_CONTINUE;
|
|
|
}
|
|
|
|
|
|
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
|