|
@@ -5586,8 +5586,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
clgi();
|
|
clgi();
|
|
|
|
|
|
- local_irq_enable();
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
|
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
|
* it's non-zero. Since vmentry is serialising on affected CPUs, there
|
|
* it's non-zero. Since vmentry is serialising on affected CPUs, there
|
|
@@ -5596,6 +5594,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|
*/
|
|
*/
|
|
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
|
|
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
|
|
|
|
|
|
|
|
+ local_irq_enable();
|
|
|
|
+
|
|
asm volatile (
|
|
asm volatile (
|
|
"push %%" _ASM_BP "; \n\t"
|
|
"push %%" _ASM_BP "; \n\t"
|
|
"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
|
|
"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
|
|
@@ -5718,12 +5718,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
|
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
|
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
|
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
|
|
|
|
|
- x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
|
|
|
|
-
|
|
|
|
reload_tss(vcpu);
|
|
reload_tss(vcpu);
|
|
|
|
|
|
local_irq_disable();
|
|
local_irq_disable();
|
|
|
|
|
|
|
|
+ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
|
|
|
|
+
|
|
vcpu->arch.cr2 = svm->vmcb->save.cr2;
|
|
vcpu->arch.cr2 = svm->vmcb->save.cr2;
|
|
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
|
|
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
|
|
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
|
|
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
|