|
@@ -213,6 +213,12 @@ struct vcpu_svm {
|
|
|
} host;
|
|
|
|
|
|
u64 spec_ctrl;
|
|
|
+ /*
|
|
|
+ * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
|
|
|
+ * translated into the appropriate L2_CFG bits on the host to
|
|
|
+ * perform speculative control.
|
|
|
+ */
|
|
|
+ u64 virt_spec_ctrl;
|
|
|
|
|
|
u32 *msrpm;
|
|
|
|
|
@@ -2060,6 +2066,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|
|
|
|
|
vcpu->arch.microcode_version = 0x01000065;
|
|
|
svm->spec_ctrl = 0;
|
|
|
+ svm->virt_spec_ctrl = 0;
|
|
|
|
|
|
if (!init_event) {
|
|
|
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
|
|
@@ -5557,7 +5564,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
* is no need to worry about the conditional branch over the wrmsr
|
|
|
* being speculatively taken.
|
|
|
*/
|
|
|
- x86_spec_ctrl_set_guest(svm->spec_ctrl);
|
|
|
+ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
|
|
|
|
|
|
asm volatile (
|
|
|
"push %%" _ASM_BP "; \n\t"
|
|
@@ -5681,7 +5688,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
|
|
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
|
|
|
|
|
- x86_spec_ctrl_restore_host(svm->spec_ctrl);
|
|
|
+ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
|
|
|
|
|
|
reload_tss(vcpu);
|
|
|
|