|
|
@@ -313,27 +313,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
|
|
|
|
|
spin_lock_irqsave(&irq->irq_lock, flags);
|
|
|
|
|
|
- /*
|
|
|
- * If this virtual IRQ was written into a list register, we
|
|
|
- * have to make sure the CPU that runs the VCPU thread has
|
|
|
- * synced back the LR state to the struct vgic_irq.
|
|
|
- *
|
|
|
- * As long as the conditions below are true, we know the VCPU thread
|
|
|
- * may be on its way back from the guest (we kicked the VCPU thread in
|
|
|
- * vgic_change_active_prepare) and still has to sync back this IRQ,
|
|
|
- * so we release and re-acquire the spin_lock to let the other thread
|
|
|
- * sync back the IRQ.
|
|
|
- *
|
|
|
- * When accessing VGIC state from user space, requester_vcpu is
|
|
|
- * NULL, which is fine, because we guarantee that no VCPUs are running
|
|
|
- * when accessing VGIC state from user space so irq->vcpu->cpu is
|
|
|
- * always -1.
|
|
|
- */
|
|
|
- while (irq->vcpu && /* IRQ may have state in an LR somewhere */
|
|
|
- irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
|
|
|
- irq->vcpu->cpu != -1) /* VCPU thread is running */
|
|
|
- cond_resched_lock(&irq->irq_lock);
|
|
|
-
|
|
|
if (irq->hw) {
|
|
|
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
|
|
|
} else {
|