|
@@ -495,6 +495,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
|
|
|
+ * @vcpu: The VCPU pointer
|
|
|
+ * @vintid: The INTID of the interrupt
|
|
|
+ *
|
|
|
+ * Reset the active and pending states of a mapped interrupt. Kernel
|
|
|
+ * subsystems injecting mapped interrupts should reset their interrupt lines
|
|
|
+ * when we are doing a reset of the VM.
|
|
|
+ */
|
|
|
+void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
|
|
|
+{
|
|
|
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (!irq->hw)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&irq->irq_lock, flags);
|
|
|
+ irq->active = false;
|
|
|
+ irq->pending_latch = false;
|
|
|
+ irq->line_level = false;
|
|
|
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
|
+out:
|
|
|
+ vgic_put_irq(vcpu->kvm, irq);
|
|
|
+}
|
|
|
+
|
|
|
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
|
|
|
{
|
|
|
struct vgic_irq *irq;
|
|
@@ -684,22 +710,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
|
|
|
vgic_v3_set_underflow(vcpu);
|
|
|
}
|
|
|
|
|
|
+static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ if (kvm_vgic_global_state.type == VGIC_V2)
|
|
|
+ vgic_v2_set_npie(vcpu);
|
|
|
+ else
|
|
|
+ vgic_v3_set_npie(vcpu);
|
|
|
+}
|
|
|
+
|
|
|
/* Requires the ap_list_lock to be held. */
|
|
|
-static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
|
|
|
+static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
|
|
|
+ bool *multi_sgi)
|
|
|
{
|
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
struct vgic_irq *irq;
|
|
|
int count = 0;
|
|
|
|
|
|
+ *multi_sgi = false;
|
|
|
+
|
|
|
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
|
|
|
|
|
|
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
|
|
|
spin_lock(&irq->irq_lock);
|
|
|
/* GICv2 SGIs can count for more than one... */
|
|
|
- if (vgic_irq_is_sgi(irq->intid) && irq->source)
|
|
|
- count += hweight8(irq->source);
|
|
|
- else
|
|
|
+ if (vgic_irq_is_sgi(irq->intid) && irq->source) {
|
|
|
+ int w = hweight8(irq->source);
|
|
|
+
|
|
|
+ count += w;
|
|
|
+ *multi_sgi |= (w > 1);
|
|
|
+ } else {
|
|
|
count++;
|
|
|
+ }
|
|
|
spin_unlock(&irq->irq_lock);
|
|
|
}
|
|
|
return count;
|
|
@@ -710,28 +751,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
struct vgic_irq *irq;
|
|
|
- int count = 0;
|
|
|
+ int count;
|
|
|
+ bool npie = false;
|
|
|
+ bool multi_sgi;
|
|
|
+ u8 prio = 0xff;
|
|
|
|
|
|
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
|
|
|
|
|
|
- if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr)
|
|
|
+ count = compute_ap_list_depth(vcpu, &multi_sgi);
|
|
|
+ if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
|
|
|
vgic_sort_ap_list(vcpu);
|
|
|
|
|
|
+ count = 0;
|
|
|
+
|
|
|
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
|
|
|
spin_lock(&irq->irq_lock);
|
|
|
|
|
|
- if (unlikely(vgic_target_oracle(irq) != vcpu))
|
|
|
- goto next;
|
|
|
-
|
|
|
/*
|
|
|
- * If we get an SGI with multiple sources, try to get
|
|
|
- * them in all at once.
|
|
|
+ * If we have multi-SGIs in the pipeline, we need to
|
|
|
+ * guarantee that they are all seen before any IRQ of
|
|
|
+ * lower priority. In that case, we need to filter out
|
|
|
+ * these interrupts by exiting early. This is easy as
|
|
|
+ * the AP list has been sorted already.
|
|
|
*/
|
|
|
- do {
|
|
|
+ if (multi_sgi && irq->priority > prio) {
|
|
|
+ spin_unlock(&irq->irq_lock);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (likely(vgic_target_oracle(irq) == vcpu)) {
|
|
|
vgic_populate_lr(vcpu, irq, count++);
|
|
|
- } while (irq->source && count < kvm_vgic_global_state.nr_lr);
|
|
|
|
|
|
-next:
|
|
|
+ if (irq->source) {
|
|
|
+ npie = true;
|
|
|
+ prio = irq->priority;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
spin_unlock(&irq->irq_lock);
|
|
|
|
|
|
if (count == kvm_vgic_global_state.nr_lr) {
|
|
@@ -742,6 +798,9 @@ next:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (npie)
|
|
|
+ vgic_set_npie(vcpu);
|
|
|
+
|
|
|
vcpu->arch.vgic_cpu.used_lrs = count;
|
|
|
|
|
|
/* Nuke remaining LRs */
|