|
@@ -5772,22 +5772,27 @@ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
|
|
|
|
|
|
/*
|
|
|
* Free all VMCSs saved for this vcpu, except the one pointed by
|
|
|
- * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one
|
|
|
- * currently used, if running L2), and vmcs01 when running L2.
|
|
|
+ * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs
|
|
|
+ * must be &vmx->vmcs01.
|
|
|
*/
|
|
|
static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
|
|
|
{
|
|
|
struct vmcs02_list *item, *n;
|
|
|
+
|
|
|
+ WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
|
|
|
list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
|
|
|
- if (vmx->loaded_vmcs != &item->vmcs02)
|
|
|
- free_loaded_vmcs(&item->vmcs02);
|
|
|
+ /*
|
|
|
+ * Something will leak if the above WARN triggers. Better than
|
|
|
+ * a use-after-free.
|
|
|
+ */
|
|
|
+ if (vmx->loaded_vmcs == &item->vmcs02)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ free_loaded_vmcs(&item->vmcs02);
|
|
|
list_del(&item->list);
|
|
|
kfree(item);
|
|
|
+ vmx->nested.vmcs02_num--;
|
|
|
}
|
|
|
- vmx->nested.vmcs02_num = 0;
|
|
|
-
|
|
|
- if (vmx->loaded_vmcs != &vmx->vmcs01)
|
|
|
- free_loaded_vmcs(&vmx->vmcs01);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -7557,13 +7562,31 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
vmx_complete_interrupts(vmx);
|
|
|
}
|
|
|
|
|
|
+static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ if (vmx->loaded_vmcs == &vmx->vmcs01)
|
|
|
+ return;
|
|
|
+
|
|
|
+ cpu = get_cpu();
|
|
|
+ vmx->loaded_vmcs = &vmx->vmcs01;
|
|
|
+ vmx_vcpu_put(vcpu);
|
|
|
+ vmx_vcpu_load(vcpu, cpu);
|
|
|
+ vcpu->cpu = cpu;
|
|
|
+ put_cpu();
|
|
|
+}
|
|
|
+
|
|
|
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
free_vpid(vmx);
|
|
|
- free_loaded_vmcs(vmx->loaded_vmcs);
|
|
|
+ leave_guest_mode(vcpu);
|
|
|
+ vmx_load_vmcs01(vcpu);
|
|
|
free_nested(vmx);
|
|
|
+ free_loaded_vmcs(vmx->loaded_vmcs);
|
|
|
kfree(vmx->guest_msrs);
|
|
|
kvm_vcpu_uninit(vcpu);
|
|
|
kmem_cache_free(kvm_vcpu_cache, vmx);
|
|
@@ -8721,7 +8744,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|
|
unsigned long exit_qualification)
|
|
|
{
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
- int cpu;
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
|
|
/* trying to cancel vmlaunch/vmresume is a bug */
|
|
@@ -8746,12 +8768,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|
|
vmcs12->vm_exit_intr_error_code,
|
|
|
KVM_ISA_VMX);
|
|
|
|
|
|
- cpu = get_cpu();
|
|
|
- vmx->loaded_vmcs = &vmx->vmcs01;
|
|
|
- vmx_vcpu_put(vcpu);
|
|
|
- vmx_vcpu_load(vcpu, cpu);
|
|
|
- vcpu->cpu = cpu;
|
|
|
- put_cpu();
|
|
|
+ vmx_load_vmcs01(vcpu);
|
|
|
|
|
|
vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
|
|
|
vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
|