|
@@ -53,6 +53,10 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
|
|
|
* vcpuX->vcpu_id < vcpuY->vcpu_id:
|
|
|
* spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
|
|
|
* spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
|
|
|
+ *
|
|
|
+ * Since the VGIC must support injecting virtual interrupts from ISRs, we have
|
|
|
+ * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
|
|
|
+ * spinlocks for any lock that may be taken while injecting an interrupt.
|
|
|
*/
|
|
|
|
|
|
/*
|
|
@@ -261,7 +265,8 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne
|
|
|
* Needs to be entered with the IRQ lock already held, but will return
|
|
|
* with all locks dropped.
|
|
|
*/
|
|
|
-bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
|
|
|
+bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
|
|
|
+ unsigned long flags)
|
|
|
{
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
|
@@ -279,7 +284,7 @@ retry:
|
|
|
* not need to be inserted into an ap_list and there is also
|
|
|
* no more work for us to do.
|
|
|
*/
|
|
|
- spin_unlock(&irq->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
|
|
|
|
/*
|
|
|
* We have to kick the VCPU here, because we could be
|
|
@@ -301,11 +306,11 @@ retry:
|
|
|
* We must unlock the irq lock to take the ap_list_lock where
|
|
|
* we are going to insert this new pending interrupt.
|
|
|
*/
|
|
|
- spin_unlock(&irq->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
|
|
|
|
/* someone can do stuff here, which we re-check below */
|
|
|
|
|
|
- spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
|
|
+ spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
|
|
|
spin_lock(&irq->irq_lock);
|
|
|
|
|
|
/*
|
|
@@ -322,9 +327,9 @@ retry:
|
|
|
|
|
|
if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
|
|
|
spin_unlock(&irq->irq_lock);
|
|
|
- spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
|
|
+ spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
|
|
|
|
|
|
- spin_lock(&irq->irq_lock);
|
|
|
+ spin_lock_irqsave(&irq->irq_lock, flags);
|
|
|
goto retry;
|
|
|
}
|
|
|
|
|
@@ -337,7 +342,7 @@ retry:
|
|
|
irq->vcpu = vcpu;
|
|
|
|
|
|
spin_unlock(&irq->irq_lock);
|
|
|
- spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
|
|
+ spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
|
|
|
|
|
|
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
|
|
|
kvm_vcpu_kick(vcpu);
|
|
@@ -367,6 +372,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
|
|
|
{
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
struct vgic_irq *irq;
|
|
|
+ unsigned long flags;
|
|
|
int ret;
|
|
|
|
|
|
trace_vgic_update_irq_pending(cpuid, intid, level);
|
|
@@ -383,11 +389,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
|
|
|
if (!irq)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- spin_lock(&irq->irq_lock);
|
|
|
+ spin_lock_irqsave(&irq->irq_lock, flags);
|
|
|
|
|
|
if (!vgic_validate_injection(irq, level, owner)) {
|
|
|
/* Nothing to see here, move along... */
|
|
|
- spin_unlock(&irq->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
|
vgic_put_irq(kvm, irq);
|
|
|
return 0;
|
|
|
}
|
|
@@ -397,7 +403,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
|
|
|
else
|
|
|
irq->pending_latch = true;
|
|
|
|
|
|
- vgic_queue_irq_unlock(kvm, irq);
|
|
|
+ vgic_queue_irq_unlock(kvm, irq, flags);
|
|
|
vgic_put_irq(kvm, irq);
|
|
|
|
|
|
return 0;
|
|
@@ -406,15 +412,16 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
|
|
|
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
|
|
|
{
|
|
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
BUG_ON(!irq);
|
|
|
|
|
|
- spin_lock(&irq->irq_lock);
|
|
|
+ spin_lock_irqsave(&irq->irq_lock, flags);
|
|
|
|
|
|
irq->hw = true;
|
|
|
irq->hwintid = phys_irq;
|
|
|
|
|
|
- spin_unlock(&irq->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
|
vgic_put_irq(vcpu->kvm, irq);
|
|
|
|
|
|
return 0;
|
|
@@ -423,6 +430,7 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
|
|
|
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
|
|
|
{
|
|
|
struct vgic_irq *irq;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (!vgic_initialized(vcpu->kvm))
|
|
|
return -EAGAIN;
|
|
@@ -430,12 +438,12 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
|
|
|
irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
|
|
|
BUG_ON(!irq);
|
|
|
|
|
|
- spin_lock(&irq->irq_lock);
|
|
|
+ spin_lock_irqsave(&irq->irq_lock, flags);
|
|
|
|
|
|
irq->hw = false;
|
|
|
irq->hwintid = 0;
|
|
|
|
|
|
- spin_unlock(&irq->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
|
vgic_put_irq(vcpu->kvm, irq);
|
|
|
|
|
|
return 0;
|
|
@@ -486,9 +494,10 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
struct vgic_irq *irq, *tmp;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
retry:
|
|
|
- spin_lock(&vgic_cpu->ap_list_lock);
|
|
|
+ spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
|
|
|
|
|
|
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
|
|
|
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
|
|
@@ -528,7 +537,7 @@ retry:
|
|
|
/* This interrupt looks like it has to be migrated. */
|
|
|
|
|
|
spin_unlock(&irq->irq_lock);
|
|
|
- spin_unlock(&vgic_cpu->ap_list_lock);
|
|
|
+ spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
|
|
|
|
|
|
/*
|
|
|
* Ensure locking order by always locking the smallest
|
|
@@ -542,7 +551,7 @@ retry:
|
|
|
vcpuB = vcpu;
|
|
|
}
|
|
|
|
|
|
- spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
|
|
|
+ spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
|
|
|
spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
|
|
|
SINGLE_DEPTH_NESTING);
|
|
|
spin_lock(&irq->irq_lock);
|
|
@@ -566,11 +575,11 @@ retry:
|
|
|
|
|
|
spin_unlock(&irq->irq_lock);
|
|
|
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
|
|
|
- spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
|
|
|
+ spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
|
|
|
goto retry;
|
|
|
}
|
|
|
|
|
|
- spin_unlock(&vgic_cpu->ap_list_lock);
|
|
|
+ spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
|
|
|
}
|
|
|
|
|
|
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
|
|
@@ -703,6 +712,8 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
|
|
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
|
|
|
return;
|
|
|
|
|
|
+ DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
|
|
|
+
|
|
|
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
|
|
vgic_flush_lr_state(vcpu);
|
|
|
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
|
@@ -735,11 +746,12 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
struct vgic_irq *irq;
|
|
|
bool pending = false;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (!vcpu->kvm->arch.vgic.enabled)
|
|
|
return false;
|
|
|
|
|
|
- spin_lock(&vgic_cpu->ap_list_lock);
|
|
|
+ spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
|
|
|
|
|
|
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
|
|
|
spin_lock(&irq->irq_lock);
|
|
@@ -750,7 +762,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- spin_unlock(&vgic_cpu->ap_list_lock);
|
|
|
+ spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
|
|
|
|
|
|
return pending;
|
|
|
}
|
|
@@ -776,13 +788,14 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
|
|
|
{
|
|
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
|
|
|
bool map_is_active;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (!vgic_initialized(vcpu->kvm))
|
|
|
return false;
|
|
|
|
|
|
- spin_lock(&irq->irq_lock);
|
|
|
+ spin_lock_irqsave(&irq->irq_lock, flags);
|
|
|
map_is_active = irq->hw && irq->active;
|
|
|
- spin_unlock(&irq->irq_lock);
|
|
|
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
|
vgic_put_irq(vcpu->kvm, irq);
|
|
|
|
|
|
return map_is_active;
|