|
|
@@ -1032,12 +1032,6 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
|
|
|
vgic_ops->set_lr(vcpu, lr, vlr);
|
|
|
}
|
|
|
|
|
|
-static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
|
|
|
- struct vgic_lr vlr)
|
|
|
-{
|
|
|
- vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
|
|
|
-}
|
|
|
-
|
|
|
static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
return vgic_ops->get_elrsr(vcpu);
|
|
|
@@ -1100,7 +1094,6 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
|
|
|
|
|
|
vlr.state = 0;
|
|
|
vgic_set_lr(vcpu, lr_nr, vlr);
|
|
|
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -1162,7 +1155,6 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
|
|
|
}
|
|
|
|
|
|
vgic_set_lr(vcpu, lr_nr, vlr);
|
|
|
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -1340,8 +1332,6 @@ static int process_queued_irq(struct kvm_vcpu *vcpu,
|
|
|
vlr.hwirq = 0;
|
|
|
vgic_set_lr(vcpu, lr, vlr);
|
|
|
|
|
|
- vgic_sync_lr_elrsr(vcpu, lr, vlr);
|
|
|
-
|
|
|
return pending;
|
|
|
}
|
|
|
|
|
|
@@ -1442,8 +1432,6 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
|
bool level_pending;
|
|
|
|
|
|
level_pending = vgic_process_maintenance(vcpu);
|
|
|
- elrsr = vgic_get_elrsr(vcpu);
|
|
|
- elrsr_ptr = u64_to_bitmask(&elrsr);
|
|
|
|
|
|
/* Deal with HW interrupts, and clear mappings for empty LRs */
|
|
|
for (lr = 0; lr < vgic->nr_lr; lr++) {
|
|
|
@@ -1454,6 +1442,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
|
|
|
/* Check if we still have something up our sleeve... */
|
|
|
+ elrsr = vgic_get_elrsr(vcpu);
|
|
|
+ elrsr_ptr = u64_to_bitmask(&elrsr);
|
|
|
pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
|
|
|
if (level_pending || pending < vgic->nr_lr)
|
|
|
set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
|