|
@@ -1081,6 +1081,7 @@ epilog:
|
|
|
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
u32 status = vgic_get_interrupt_status(vcpu);
|
|
|
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
bool level_pending = false;
|
|
|
|
|
|
kvm_debug("STATUS = %08x\n", status);
|
|
@@ -1098,6 +1099,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|
|
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
|
|
|
WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
|
|
|
|
|
|
+ spin_lock(&dist->lock);
|
|
|
vgic_irq_clear_queued(vcpu, vlr.irq);
|
|
|
WARN_ON(vlr.state & LR_STATE_MASK);
|
|
|
vlr.state = 0;
|
|
@@ -1125,6 +1127,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|
|
vgic_cpu_irq_clear(vcpu, vlr.irq);
|
|
|
}
|
|
|
|
|
|
+ spin_unlock(&dist->lock);
|
|
|
+
|
|
|
/*
|
|
|
* Despite being EOIed, the LR may not have
|
|
|
* been marked as empty.
|
|
@@ -1139,10 +1143,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|
|
return level_pending;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Sync back the VGIC state after a guest run. The distributor lock is
|
|
|
- * needed so we don't get preempted in the middle of the state processing.
|
|
|
- */
|
|
|
+/* Sync back the VGIC state after a guest run */
|
|
|
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
@@ -1189,14 +1190,10 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
-
|
|
|
if (!irqchip_in_kernel(vcpu->kvm))
|
|
|
return;
|
|
|
|
|
|
- spin_lock(&dist->lock);
|
|
|
__kvm_vgic_sync_hwstate(vcpu);
|
|
|
- spin_unlock(&dist->lock);
|
|
|
}
|
|
|
|
|
|
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|