|
@@ -700,7 +700,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
|
|
if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
|
|
|
return 1;
|
|
|
}
|
|
|
- kvm_put_guest_xcr0(vcpu);
|
|
|
vcpu->arch.xcr0 = xcr0;
|
|
|
|
|
|
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
|
|
@@ -6590,8 +6589,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
kvm_x86_ops->prepare_guest_switch(vcpu);
|
|
|
if (vcpu->fpu_active)
|
|
|
kvm_load_guest_fpu(vcpu);
|
|
|
- kvm_load_guest_xcr0(vcpu);
|
|
|
-
|
|
|
vcpu->mode = IN_GUEST_MODE;
|
|
|
|
|
|
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
|
@@ -6618,6 +6615,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
goto cancel_injection;
|
|
|
}
|
|
|
|
|
|
+ kvm_load_guest_xcr0(vcpu);
|
|
|
+
|
|
|
if (req_immediate_exit)
|
|
|
smp_send_reschedule(vcpu->cpu);
|
|
|
|
|
@@ -6667,6 +6666,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
vcpu->mode = OUTSIDE_GUEST_MODE;
|
|
|
smp_wmb();
|
|
|
|
|
|
+ kvm_put_guest_xcr0(vcpu);
|
|
|
+
|
|
|
/* Interrupt is enabled by handle_external_intr() */
|
|
|
kvm_x86_ops->handle_external_intr(vcpu);
|
|
|
|
|
@@ -7314,7 +7315,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|
|
* and assume host would use all available bits.
|
|
|
* Guest xcr0 would be loaded later.
|
|
|
*/
|
|
|
- kvm_put_guest_xcr0(vcpu);
|
|
|
vcpu->guest_fpu_loaded = 1;
|
|
|
__kernel_fpu_begin();
|
|
|
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
|
|
@@ -7323,8 +7323,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- kvm_put_guest_xcr0(vcpu);
|
|
|
-
|
|
|
if (!vcpu->guest_fpu_loaded) {
|
|
|
vcpu->fpu_counter = 0;
|
|
|
return;
|