|
@@ -7324,7 +7324,6 @@ static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
if (vmx->nested.current_vmptr == -1ull) {
|
|
if (vmx->nested.current_vmptr == -1ull) {
|
|
nested_vmx_failInvalid(vcpu);
|
|
nested_vmx_failInvalid(vcpu);
|
|
- skip_emulated_instruction(vcpu);
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
return 1;
|
|
return 1;
|
|
@@ -7338,9 +7337,13 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
|
|
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
gva_t gva = 0;
|
|
gva_t gva = 0;
|
|
|
|
|
|
- if (!nested_vmx_check_permission(vcpu) ||
|
|
|
|
- !nested_vmx_check_vmcs12(vcpu))
|
|
|
|
|
|
+ if (!nested_vmx_check_permission(vcpu))
|
|
|
|
+ return 1;
|
|
|
|
+
|
|
|
|
+ if (!nested_vmx_check_vmcs12(vcpu)) {
|
|
|
|
+ skip_emulated_instruction(vcpu);
|
|
return 1;
|
|
return 1;
|
|
|
|
+ }
|
|
|
|
|
|
/* Decode instruction info and find the field to read */
|
|
/* Decode instruction info and find the field to read */
|
|
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
|
|
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
|
|
@@ -7388,10 +7391,14 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
|
|
u64 field_value = 0;
|
|
u64 field_value = 0;
|
|
struct x86_exception e;
|
|
struct x86_exception e;
|
|
|
|
|
|
- if (!nested_vmx_check_permission(vcpu) ||
|
|
|
|
- !nested_vmx_check_vmcs12(vcpu))
|
|
|
|
|
|
+ if (!nested_vmx_check_permission(vcpu))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
|
|
+ if (!nested_vmx_check_vmcs12(vcpu)) {
|
|
|
|
+ skip_emulated_instruction(vcpu);
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
if (vmx_instruction_info & (1u << 10))
|
|
if (vmx_instruction_info & (1u << 10))
|
|
field_value = kvm_register_readl(vcpu,
|
|
field_value = kvm_register_readl(vcpu,
|
|
(((vmx_instruction_info) >> 3) & 0xf));
|
|
(((vmx_instruction_info) >> 3) & 0xf));
|
|
@@ -10046,11 +10053,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|
bool ia32e;
|
|
bool ia32e;
|
|
u32 msr_entry_idx;
|
|
u32 msr_entry_idx;
|
|
|
|
|
|
- if (!nested_vmx_check_permission(vcpu) ||
|
|
|
|
- !nested_vmx_check_vmcs12(vcpu))
|
|
|
|
|
|
+ if (!nested_vmx_check_permission(vcpu))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
- skip_emulated_instruction(vcpu);
|
|
|
|
|
|
+ if (!nested_vmx_check_vmcs12(vcpu))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
vmcs12 = get_vmcs12(vcpu);
|
|
vmcs12 = get_vmcs12(vcpu);
|
|
|
|
|
|
if (enable_shadow_vmcs)
|
|
if (enable_shadow_vmcs)
|
|
@@ -10070,33 +10078,33 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|
nested_vmx_failValid(vcpu,
|
|
nested_vmx_failValid(vcpu,
|
|
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
|
|
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
|
|
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
|
|
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
|
|
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
|
|
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) {
|
|
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) {
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
|
|
if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) {
|
|
if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) {
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) {
|
|
if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) {
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
|
|
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
|
|
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
|
|
@@ -10116,26 +10124,26 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|
vmx->nested.nested_vmx_entry_ctls_high))
|
|
vmx->nested.nested_vmx_entry_ctls_high))
|
|
{
|
|
{
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
|
|
if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
|
|
((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
|
|
((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
|
|
nested_vmx_failValid(vcpu,
|
|
nested_vmx_failValid(vcpu,
|
|
VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
|
|
VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) ||
|
|
if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) ||
|
|
((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
|
|
((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
|
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
if (vmcs12->vmcs_link_pointer != -1ull) {
|
|
if (vmcs12->vmcs_link_pointer != -1ull) {
|
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
|
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -10155,7 +10163,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
|
|
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
|
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -10173,7 +10181,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
|
|
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
|
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
|
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
|
|
- return 1;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -10186,6 +10194,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|
if (!vmcs02)
|
|
if (!vmcs02)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
+ skip_emulated_instruction(vcpu);
|
|
enter_guest_mode(vcpu);
|
|
enter_guest_mode(vcpu);
|
|
|
|
|
|
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
|
|
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
|
|
@@ -10227,6 +10236,10 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|
* the success flag) when L2 exits (see nested_vmx_vmexit()).
|
|
* the success flag) when L2 exits (see nested_vmx_vmexit()).
|
|
*/
|
|
*/
|
|
return 1;
|
|
return 1;
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ skip_emulated_instruction(vcpu);
|
|
|
|
+ return 1;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|