|
@@ -2909,7 +2909,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
|
|
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
|
|
struct kvm_lapic_state *s)
|
|
struct kvm_lapic_state *s)
|
|
{
|
|
{
|
|
- if (vcpu->arch.apicv_active)
|
|
|
|
|
|
+ if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
|
|
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
|
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
|
|
|
|
|
return kvm_apic_get_state(vcpu, s);
|
|
return kvm_apic_get_state(vcpu, s);
|
|
@@ -6659,7 +6659,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
|
|
if (irqchip_split(vcpu->kvm))
|
|
if (irqchip_split(vcpu->kvm))
|
|
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
|
|
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
|
|
else {
|
|
else {
|
|
- if (vcpu->arch.apicv_active)
|
|
|
|
|
|
+ if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
|
|
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
|
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
|
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
|
|
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
|
|
}
|
|
}
|
|
@@ -6822,11 +6822,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
* Update architecture specific hints for APIC
|
|
* Update architecture specific hints for APIC
|
|
* virtual interrupt delivery.
|
|
* virtual interrupt delivery.
|
|
*/
|
|
*/
|
|
- if (vcpu->arch.apicv_active) {
|
|
|
|
|
|
+ if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
|
|
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
|
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
|
- kvm_x86_ops->hwapic_irr_update(vcpu,
|
|
|
|
- kvm_lapic_find_highest_irr(vcpu));
|
|
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
|
|
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
|