|
@@ -22,59 +22,17 @@
|
|
|
|
|
|
#include "vgic.h"
|
|
|
|
|
|
-/*
|
|
|
- * Call this function to convert a u64 value to an unsigned long * bitmask
|
|
|
- * in a way that works on both 32-bit and 64-bit LE and BE platforms.
|
|
|
- *
|
|
|
- * Warning: Calling this function may modify *val.
|
|
|
- */
|
|
|
-static unsigned long *u64_to_bitmask(u64 *val)
|
|
|
-{
|
|
|
-#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
|
|
|
- *val = (*val >> 32) | (*val << 32);
|
|
|
-#endif
|
|
|
- return (unsigned long *)val;
|
|
|
-}
|
|
|
-
|
|
|
-void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
|
|
|
+void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
|
|
|
|
|
|
- if (cpuif->vgic_misr & GICH_MISR_EOI) {
|
|
|
- u64 eisr = cpuif->vgic_eisr;
|
|
|
- unsigned long *eisr_bmap = u64_to_bitmask(&eisr);
|
|
|
- int lr;
|
|
|
-
|
|
|
- for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) {
|
|
|
- u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID;
|
|
|
-
|
|
|
- WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
|
|
|
-
|
|
|
- /* Only SPIs require notification */
|
|
|
- if (vgic_valid_spi(vcpu->kvm, intid))
|
|
|
- kvm_notify_acked_irq(vcpu->kvm, 0,
|
|
|
- intid - VGIC_NR_PRIVATE_IRQS);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* check and disable underflow maintenance IRQ */
|
|
|
- cpuif->vgic_hcr &= ~GICH_HCR_UIE;
|
|
|
-
|
|
|
- /*
|
|
|
- * In the next iterations of the vcpu loop, if we sync the
|
|
|
- * vgic state after flushing it, but before entering the guest
|
|
|
- * (this happens for pending signals and vmid rollovers), then
|
|
|
- * make sure we don't pick up any old maintenance interrupts
|
|
|
- * here.
|
|
|
- */
|
|
|
- cpuif->vgic_eisr = 0;
|
|
|
+ cpuif->vgic_hcr |= GICH_HCR_UIE;
|
|
|
}
|
|
|
|
|
|
-void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
|
|
|
+static bool lr_signals_eoi_mi(u32 lr_val)
|
|
|
{
|
|
|
- struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
|
|
|
-
|
|
|
- cpuif->vgic_hcr |= GICH_HCR_UIE;
|
|
|
+ return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
|
|
|
+ !(lr_val & GICH_LR_HW);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -89,11 +47,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
|
|
|
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
|
|
|
int lr;
|
|
|
|
|
|
+ cpuif->vgic_hcr &= ~GICH_HCR_UIE;
|
|
|
+
|
|
|
for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
|
|
|
u32 val = cpuif->vgic_lr[lr];
|
|
|
u32 intid = val & GICH_LR_VIRTUALID;
|
|
|
struct vgic_irq *irq;
|
|
|
|
|
|
+ /* Notify fds when the guest EOI'ed a level-triggered SPI */
|
|
|
+ if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
|
|
|
+ kvm_notify_acked_irq(vcpu->kvm, 0,
|
|
|
+ intid - VGIC_NR_PRIVATE_IRQS);
|
|
|
+
|
|
|
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
|
|
|
|
|
|
spin_lock(&irq->irq_lock);
|