|
@@ -2193,21 +2193,31 @@ out:
|
|
|
return ERR_PTR(err);
|
|
|
}
|
|
|
|
|
|
+static void svm_clear_current_vmcb(struct vmcb *vmcb)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_online_cpu(i)
|
|
|
+ cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
|
|
|
+}
|
|
|
+
|
|
|
static void svm_free_vcpu(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
|
+ /*
|
|
|
+ * The vmcb page can be recycled, causing a false negative in
|
|
|
+ * svm_vcpu_load(). So, ensure that no logical CPU has this
|
|
|
+ * vmcb page recorded as its current vmcb.
|
|
|
+ */
|
|
|
+ svm_clear_current_vmcb(svm->vmcb);
|
|
|
+
|
|
|
__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
|
|
|
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
|
|
|
__free_page(virt_to_page(svm->nested.hsave));
|
|
|
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
|
|
|
kvm_vcpu_uninit(vcpu);
|
|
|
kmem_cache_free(kvm_vcpu_cache, svm);
|
|
|
- /*
|
|
|
- * The vmcb page can be recycled, causing a false negative in
|
|
|
- * svm_vcpu_load(). So do a full IBPB now.
|
|
|
- */
|
|
|
- indirect_branch_prediction_barrier();
|
|
|
}
|
|
|
|
|
|
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|