|
|
@@ -6453,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
|
|
|
+ !vcpu->arch.apf.halted);
|
|
|
+}
|
|
|
+
|
|
|
static int vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
int r;
|
|
|
@@ -6461,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
|
|
|
|
|
for (;;) {
|
|
|
- if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
|
|
|
- !vcpu->arch.apf.halted)
|
|
|
+ if (kvm_vcpu_running(vcpu))
|
|
|
r = vcpu_enter_guest(vcpu);
|
|
|
else
|
|
|
r = vcpu_block(kvm, vcpu);
|
|
|
@@ -7762,19 +7767,33 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
|
|
kvm_mmu_invalidate_zap_all_pages(kvm);
|
|
|
}
|
|
|
|
|
|
+static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ if (!list_empty_careful(&vcpu->async_pf.done))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ if (kvm_apic_has_events(vcpu))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ if (vcpu->arch.pv.pv_unhalted)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ if (atomic_read(&vcpu->arch.nmi_queued))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ if (kvm_arch_interrupt_allowed(vcpu) &&
|
|
|
+ kvm_cpu_has_interrupt(vcpu))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
|
|
|
kvm_x86_ops->check_nested_events(vcpu, false);
|
|
|
|
|
|
- return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
|
|
|
- !vcpu->arch.apf.halted)
|
|
|
- || !list_empty_careful(&vcpu->async_pf.done)
|
|
|
- || kvm_apic_has_events(vcpu)
|
|
|
- || vcpu->arch.pv.pv_unhalted
|
|
|
- || atomic_read(&vcpu->arch.nmi_queued) ||
|
|
|
- (kvm_arch_interrupt_allowed(vcpu) &&
|
|
|
- kvm_cpu_has_interrupt(vcpu));
|
|
|
+ return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
|
|
|
}
|
|
|
|
|
|
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|