|
@@ -10411,6 +10411,11 @@ static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
|
|
|
return ((rvi & 0xf0) > (vppr & 0xf0));
|
|
return ((rvi & 0xf0) > (vppr & 0xf0));
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
|
|
|
|
|
+{
|
|
|
|
|
+ return pi_test_on(vcpu_to_pi_desc(vcpu));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
|
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
|
|
{
|
|
{
|
|
|
if (!kvm_vcpu_apicv_active(vcpu))
|
|
if (!kvm_vcpu_apicv_active(vcpu))
|
|
@@ -14387,6 +14392,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|
|
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
|
|
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
|
|
|
.sync_pir_to_irr = vmx_sync_pir_to_irr,
|
|
.sync_pir_to_irr = vmx_sync_pir_to_irr,
|
|
|
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
|
|
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
|
|
|
|
|
+ .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
|
|
|
|
|
|
|
|
.set_tss_addr = vmx_set_tss_addr,
|
|
.set_tss_addr = vmx_set_tss_addr,
|
|
|
.set_identity_map_addr = vmx_set_identity_map_addr,
|
|
.set_identity_map_addr = vmx_set_identity_map_addr,
|