|
@@ -10660,9 +10660,9 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
|
|
|
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
|
|
|
struct vmcs12 *vmcs12);
|
|
|
|
|
|
-static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
|
|
|
- struct vmcs12 *vmcs12)
|
|
|
+static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
struct page *page;
|
|
|
u64 hpa;
|
|
@@ -11774,12 +11774,17 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
|
|
|
+/*
|
|
|
+ * If exit_qual is NULL, this is being called from RSM.
|
|
|
+ * Otherwise it's called from vmlaunch/vmresume.
|
|
|
+ */
|
|
|
+static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
|
|
|
{
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
- u32 exit_qual;
|
|
|
- int r;
|
|
|
+ bool from_vmentry = !!exit_qual;
|
|
|
+ u32 dummy_exit_qual;
|
|
|
+ int r = 0;
|
|
|
|
|
|
enter_guest_mode(vcpu);
|
|
|
|
|
@@ -11793,17 +11798,28 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
|
|
|
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
|
|
|
|
|
|
r = EXIT_REASON_INVALID_STATE;
|
|
|
- if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
|
|
|
+ if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual))
|
|
|
goto fail;
|
|
|
|
|
|
- nested_get_vmcs12_pages(vcpu, vmcs12);
|
|
|
+ if (from_vmentry) {
|
|
|
+ nested_get_vmcs12_pages(vcpu);
|
|
|
|
|
|
- r = EXIT_REASON_MSR_LOAD_FAIL;
|
|
|
- exit_qual = nested_vmx_load_msr(vcpu,
|
|
|
- vmcs12->vm_entry_msr_load_addr,
|
|
|
- vmcs12->vm_entry_msr_load_count);
|
|
|
- if (exit_qual)
|
|
|
- goto fail;
|
|
|
+ r = EXIT_REASON_MSR_LOAD_FAIL;
|
|
|
+ *exit_qual = nested_vmx_load_msr(vcpu,
|
|
|
+ vmcs12->vm_entry_msr_load_addr,
|
|
|
+ vmcs12->vm_entry_msr_load_count);
|
|
|
+ if (*exit_qual)
|
|
|
+ goto fail;
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * The MMU is not initialized to point at the right entities yet and
|
|
|
+ * "get pages" would need to read data from the guest (i.e. we will
|
|
|
+ * need to perform gpa to hpa translation). Request a call
|
|
|
+ * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
|
|
|
+ * have already been set at vmentry time and should not be reset.
|
|
|
+ */
|
|
|
+ kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
|
|
@@ -11818,8 +11834,7 @@ fail:
|
|
|
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
|
|
|
leave_guest_mode(vcpu);
|
|
|
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
|
|
- nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
|
|
|
- return 1;
|
|
|
+ return r;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -11896,10 +11911,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|
|
*/
|
|
|
|
|
|
vmx->nested.nested_run_pending = 1;
|
|
|
- ret = enter_vmx_non_root_mode(vcpu);
|
|
|
+ ret = enter_vmx_non_root_mode(vcpu, &exit_qual);
|
|
|
if (ret) {
|
|
|
+ nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual);
|
|
|
vmx->nested.nested_run_pending = 0;
|
|
|
- return ret;
|
|
|
+ return 1;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -12985,7 +13001,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
|
|
|
|
|
|
if (vmx->nested.smm.guest_mode) {
|
|
|
vcpu->arch.hflags &= ~HF_SMM_MASK;
|
|
|
- ret = enter_vmx_non_root_mode(vcpu);
|
|
|
+ ret = enter_vmx_non_root_mode(vcpu, NULL);
|
|
|
vcpu->arch.hflags |= HF_SMM_MASK;
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -13134,6 +13150,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|
|
|
|
|
.setup_mce = vmx_setup_mce,
|
|
|
|
|
|
+ .get_vmcs12_pages = nested_get_vmcs12_pages,
|
|
|
+
|
|
|
.smi_allowed = vmx_smi_allowed,
|
|
|
.pre_enter_smm = vmx_pre_enter_smm,
|
|
|
.pre_leave_smm = vmx_pre_leave_smm,
|