|
@@ -6983,7 +6983,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
|
|
|
* Cause the #SS fault with 0 error code in VM86 mode.
|
|
|
*/
|
|
|
if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
|
|
|
- if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
|
|
|
+ if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
|
|
|
if (vcpu->arch.halt_request) {
|
|
|
vcpu->arch.halt_request = 0;
|
|
|
return kvm_vcpu_halt(vcpu);
|
|
@@ -7054,7 +7054,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
|
|
|
WARN_ON_ONCE(!enable_vmware_backdoor);
|
|
|
- er = emulate_instruction(vcpu,
|
|
|
+ er = kvm_emulate_instruction(vcpu,
|
|
|
EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
|
|
|
if (er == EMULATE_USER_EXIT)
|
|
|
return 0;
|
|
@@ -7157,7 +7157,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
|
|
|
++vcpu->stat.io_exits;
|
|
|
|
|
|
if (string)
|
|
|
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
|
|
|
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
|
|
|
|
|
|
port = exit_qualification >> 16;
|
|
|
size = (exit_qualification & 7) + 1;
|
|
@@ -7231,7 +7231,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
|
|
|
static int handle_desc(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
|
|
|
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
|
|
|
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
static int handle_cr(struct kvm_vcpu *vcpu)
|
|
@@ -7480,7 +7480,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static int handle_invd(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
|
|
|
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
static int handle_invlpg(struct kvm_vcpu *vcpu)
|
|
@@ -7547,7 +7547,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
|
|
|
return kvm_skip_emulated_instruction(vcpu);
|
|
|
}
|
|
|
}
|
|
|
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
|
|
|
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
|
|
@@ -7704,7 +7704,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
|
|
|
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
|
|
|
return kvm_skip_emulated_instruction(vcpu);
|
|
|
else
|
|
|
- return emulate_instruction(vcpu, EMULTYPE_SKIP) ==
|
|
|
+ return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
|
|
|
EMULATE_DONE;
|
|
|
}
|
|
|
|
|
@@ -7748,7 +7748,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
|
|
|
if (kvm_test_request(KVM_REQ_EVENT, vcpu))
|
|
|
return 1;
|
|
|
|
|
|
- err = emulate_instruction(vcpu, 0);
|
|
|
+ err = kvm_emulate_instruction(vcpu, 0);
|
|
|
|
|
|
if (err == EMULATE_USER_EXIT) {
|
|
|
++vcpu->stat.mmio_exits;
|