|
@@ -34,6 +34,9 @@
|
|
|
#include <asm/kvm.h>
|
|
|
#include <kvm/iodev.h>
|
|
|
|
|
|
+#define CREATE_TRACE_POINTS
|
|
|
+#include "trace.h"
|
|
|
+
|
|
|
/*
|
|
|
* How the whole thing works (courtesy of Christoffer Dall):
|
|
|
*
|
|
@@ -102,11 +105,13 @@
|
|
|
#include "vgic.h"
|
|
|
|
|
|
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
|
|
|
-static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
|
|
|
+static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
|
|
|
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
|
|
|
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
|
|
|
+static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
|
|
|
static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
|
|
|
int virt_irq);
|
|
|
+static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
static const struct vgic_ops *vgic_ops;
|
|
|
static const struct vgic_params *vgic;
|
|
@@ -357,6 +362,11 @@ static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
|
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
|
|
|
vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
|
|
|
+ if (!vgic_dist_irq_get_level(vcpu, irq)) {
|
|
|
+ vgic_dist_irq_clear_pending(vcpu, irq);
|
|
|
+ if (!compute_pending_for_cpu(vcpu))
|
|
|
+ clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
|
|
@@ -654,10 +664,9 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
|
|
|
vgic_reg_access(mmio, &val, offset,
|
|
|
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
|
|
|
if (mmio->is_write) {
|
|
|
- if (offset < 8) {
|
|
|
- *reg = ~0U; /* Force PPIs/SGIs to 1 */
|
|
|
+ /* Ignore writes to read-only SGI and PPI bits */
|
|
|
+ if (offset < 8)
|
|
|
return false;
|
|
|
- }
|
|
|
|
|
|
val = vgic_cfg_compress(val);
|
|
|
if (offset & 4) {
|
|
@@ -683,9 +692,11 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
|
|
|
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
+ u64 elrsr = vgic_get_elrsr(vcpu);
|
|
|
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
|
|
|
int i;
|
|
|
|
|
|
- for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
|
|
|
+ for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
|
|
|
struct vgic_lr lr = vgic_get_lr(vcpu, i);
|
|
|
|
|
|
/*
|
|
@@ -706,30 +717,14 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
|
|
|
* interrupt then move the active state to the
|
|
|
* distributor tracking bit.
|
|
|
*/
|
|
|
- if (lr.state & LR_STATE_ACTIVE) {
|
|
|
+ if (lr.state & LR_STATE_ACTIVE)
|
|
|
vgic_irq_set_active(vcpu, lr.irq);
|
|
|
- lr.state &= ~LR_STATE_ACTIVE;
|
|
|
- }
|
|
|
|
|
|
/*
|
|
|
* Reestablish the pending state on the distributor and the
|
|
|
- * CPU interface. It may have already been pending, but that
|
|
|
- * is fine, then we are only setting a few bits that were
|
|
|
- * already set.
|
|
|
- */
|
|
|
- if (lr.state & LR_STATE_PENDING) {
|
|
|
- vgic_dist_irq_set_pending(vcpu, lr.irq);
|
|
|
- lr.state &= ~LR_STATE_PENDING;
|
|
|
- }
|
|
|
-
|
|
|
- vgic_set_lr(vcpu, i, lr);
|
|
|
-
|
|
|
- /*
|
|
|
- * Mark the LR as free for other use.
|
|
|
+ * CPU interface and mark the LR as free for other use.
|
|
|
*/
|
|
|
- BUG_ON(lr.state & LR_STATE_MASK);
|
|
|
- vgic_retire_lr(i, lr.irq, vcpu);
|
|
|
- vgic_irq_clear_queued(vcpu, lr.irq);
|
|
|
+ vgic_retire_lr(i, vcpu);
|
|
|
|
|
|
/* Finally update the VGIC state. */
|
|
|
vgic_update_state(vcpu->kvm);
|
|
@@ -982,6 +977,12 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
|
|
|
pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
|
|
|
pend_shared = vcpu->arch.vgic_cpu.pending_shared;
|
|
|
|
|
|
+ if (!dist->enabled) {
|
|
|
+ bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
|
|
|
+ bitmap_zero(pend_shared, nr_shared);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
|
|
|
enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
|
|
|
bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
|
|
@@ -1009,11 +1010,6 @@ void vgic_update_state(struct kvm *kvm)
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
int c;
|
|
|
|
|
|
- if (!dist->enabled) {
|
|
|
- set_bit(0, dist->irq_pending_on_cpu);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
kvm_for_each_vcpu(c, vcpu, kvm) {
|
|
|
if (compute_pending_for_cpu(vcpu))
|
|
|
set_bit(c, dist->irq_pending_on_cpu);
|
|
@@ -1036,12 +1032,6 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
|
|
|
vgic_ops->set_lr(vcpu, lr, vlr);
|
|
|
}
|
|
|
|
|
|
-static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
|
|
|
- struct vgic_lr vlr)
|
|
|
-{
|
|
|
- vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
|
|
|
-}
|
|
|
-
|
|
|
static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
return vgic_ops->get_elrsr(vcpu);
|
|
@@ -1087,16 +1077,23 @@ static inline void vgic_enable(struct kvm_vcpu *vcpu)
|
|
|
vgic_ops->enable(vcpu);
|
|
|
}
|
|
|
|
|
|
-static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
|
|
|
+static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
|
|
|
|
|
|
+ vgic_irq_clear_queued(vcpu, vlr.irq);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We must transfer the pending state back to the distributor before
|
|
|
+ * retiring the LR, otherwise we may loose edge-triggered interrupts.
|
|
|
+ */
|
|
|
+ if (vlr.state & LR_STATE_PENDING) {
|
|
|
+ vgic_dist_irq_set_pending(vcpu, vlr.irq);
|
|
|
+ vlr.hwirq = 0;
|
|
|
+ }
|
|
|
+
|
|
|
vlr.state = 0;
|
|
|
vgic_set_lr(vcpu, lr_nr, vlr);
|
|
|
- clear_bit(lr_nr, vgic_cpu->lr_used);
|
|
|
- vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
|
|
|
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1110,17 +1107,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
|
|
|
*/
|
|
|
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
+ u64 elrsr = vgic_get_elrsr(vcpu);
|
|
|
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
|
|
|
int lr;
|
|
|
|
|
|
- for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
|
|
|
+ for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
|
|
|
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
|
|
|
|
|
|
- if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
|
|
|
- vgic_retire_lr(lr, vlr.irq, vcpu);
|
|
|
- if (vgic_irq_is_queued(vcpu, vlr.irq))
|
|
|
- vgic_irq_clear_queued(vcpu, vlr.irq);
|
|
|
- }
|
|
|
+ if (!vgic_irq_is_enabled(vcpu, vlr.irq))
|
|
|
+ vgic_retire_lr(lr, vcpu);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1132,7 +1127,8 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
|
|
|
kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
|
|
|
vgic_irq_clear_active(vcpu, irq);
|
|
|
vgic_update_state(vcpu->kvm);
|
|
|
- } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
|
|
|
+ } else {
|
|
|
+ WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
|
|
|
vlr.state |= LR_STATE_PENDING;
|
|
|
kvm_debug("Set pending: 0x%x\n", vlr.state);
|
|
|
}
|
|
@@ -1159,7 +1155,6 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
|
|
|
}
|
|
|
|
|
|
vgic_set_lr(vcpu, lr_nr, vlr);
|
|
|
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1169,8 +1164,9 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
|
|
|
*/
|
|
|
bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
|
|
{
|
|
|
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
+ u64 elrsr = vgic_get_elrsr(vcpu);
|
|
|
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
|
|
|
struct vgic_lr vlr;
|
|
|
int lr;
|
|
|
|
|
@@ -1181,28 +1177,22 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
|
|
|
|
|
kvm_debug("Queue IRQ%d\n", irq);
|
|
|
|
|
|
- lr = vgic_cpu->vgic_irq_lr_map[irq];
|
|
|
-
|
|
|
/* Do we have an active interrupt for the same CPUID? */
|
|
|
- if (lr != LR_EMPTY) {
|
|
|
+ for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
|
|
|
vlr = vgic_get_lr(vcpu, lr);
|
|
|
- if (vlr.source == sgi_source_id) {
|
|
|
+ if (vlr.irq == irq && vlr.source == sgi_source_id) {
|
|
|
kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
|
|
|
- BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
|
|
|
vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
|
|
|
return true;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/* Try to use another LR for this interrupt */
|
|
|
- lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
|
|
|
- vgic->nr_lr);
|
|
|
+ lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
|
|
|
if (lr >= vgic->nr_lr)
|
|
|
return false;
|
|
|
|
|
|
kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
|
|
|
- vgic_cpu->vgic_irq_lr_map[irq] = lr;
|
|
|
- set_bit(lr, vgic_cpu->lr_used);
|
|
|
|
|
|
vlr.irq = irq;
|
|
|
vlr.source = sgi_source_id;
|
|
@@ -1240,7 +1230,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
unsigned long *pa_percpu, *pa_shared;
|
|
|
- int i, vcpu_id, lr, ret;
|
|
|
+ int i, vcpu_id;
|
|
|
int overflow = 0;
|
|
|
int nr_shared = vgic_nr_shared_irqs(dist);
|
|
|
|
|
@@ -1295,39 +1285,62 @@ epilog:
|
|
|
*/
|
|
|
clear_bit(vcpu_id, dist->irq_pending_on_cpu);
|
|
|
}
|
|
|
+}
|
|
|
|
|
|
- for (lr = 0; lr < vgic->nr_lr; lr++) {
|
|
|
- struct vgic_lr vlr;
|
|
|
+static int process_queued_irq(struct kvm_vcpu *vcpu,
|
|
|
+ int lr, struct vgic_lr vlr)
|
|
|
+{
|
|
|
+ int pending = 0;
|
|
|
|
|
|
- if (!test_bit(lr, vgic_cpu->lr_used))
|
|
|
- continue;
|
|
|
+ /*
|
|
|
+ * If the IRQ was EOIed (called from vgic_process_maintenance) or it
|
|
|
+ * went from active to non-active (called from vgic_sync_hwirq) it was
|
|
|
+ * also ACKed and we we therefore assume we can clear the soft pending
|
|
|
+ * state (should it had been set) for this interrupt.
|
|
|
+ *
|
|
|
+ * Note: if the IRQ soft pending state was set after the IRQ was
|
|
|
+ * acked, it actually shouldn't be cleared, but we have no way of
|
|
|
+ * knowing that unless we start trapping ACKs when the soft-pending
|
|
|
+ * state is set.
|
|
|
+ */
|
|
|
+ vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
|
|
|
|
|
|
- vlr = vgic_get_lr(vcpu, lr);
|
|
|
+ /*
|
|
|
+ * Tell the gic to start sampling this interrupt again.
|
|
|
+ */
|
|
|
+ vgic_irq_clear_queued(vcpu, vlr.irq);
|
|
|
|
|
|
- /*
|
|
|
- * If we have a mapping, and the virtual interrupt is
|
|
|
- * presented to the guest (as pending or active), then we must
|
|
|
- * set the state to active in the physical world. See
|
|
|
- * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt.
|
|
|
- */
|
|
|
- if (vlr.state & LR_HW) {
|
|
|
- struct irq_phys_map *map;
|
|
|
- map = vgic_irq_map_search(vcpu, vlr.irq);
|
|
|
-
|
|
|
- ret = irq_set_irqchip_state(map->irq,
|
|
|
- IRQCHIP_STATE_ACTIVE,
|
|
|
- true);
|
|
|
- WARN_ON(ret);
|
|
|
+ /* Any additional pending interrupt? */
|
|
|
+ if (vgic_irq_is_edge(vcpu, vlr.irq)) {
|
|
|
+ BUG_ON(!(vlr.state & LR_HW));
|
|
|
+ pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
|
|
|
+ } else {
|
|
|
+ if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
|
|
|
+ vgic_cpu_irq_set(vcpu, vlr.irq);
|
|
|
+ pending = 1;
|
|
|
+ } else {
|
|
|
+ vgic_dist_irq_clear_pending(vcpu, vlr.irq);
|
|
|
+ vgic_cpu_irq_clear(vcpu, vlr.irq);
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Despite being EOIed, the LR may not have
|
|
|
+ * been marked as empty.
|
|
|
+ */
|
|
|
+ vlr.state = 0;
|
|
|
+ vlr.hwirq = 0;
|
|
|
+ vgic_set_lr(vcpu, lr, vlr);
|
|
|
+
|
|
|
+ return pending;
|
|
|
}
|
|
|
|
|
|
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
u32 status = vgic_get_interrupt_status(vcpu);
|
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
- bool level_pending = false;
|
|
|
struct kvm *kvm = vcpu->kvm;
|
|
|
+ int level_pending = 0;
|
|
|
|
|
|
kvm_debug("STATUS = %08x\n", status);
|
|
|
|
|
@@ -1342,54 +1355,22 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
|
|
|
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
|
|
|
- WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
|
|
|
|
|
|
- spin_lock(&dist->lock);
|
|
|
- vgic_irq_clear_queued(vcpu, vlr.irq);
|
|
|
+ WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
|
|
|
WARN_ON(vlr.state & LR_STATE_MASK);
|
|
|
- vlr.state = 0;
|
|
|
- vgic_set_lr(vcpu, lr, vlr);
|
|
|
|
|
|
- /*
|
|
|
- * If the IRQ was EOIed it was also ACKed and we we
|
|
|
- * therefore assume we can clear the soft pending
|
|
|
- * state (should it had been set) for this interrupt.
|
|
|
- *
|
|
|
- * Note: if the IRQ soft pending state was set after
|
|
|
- * the IRQ was acked, it actually shouldn't be
|
|
|
- * cleared, but we have no way of knowing that unless
|
|
|
- * we start trapping ACKs when the soft-pending state
|
|
|
- * is set.
|
|
|
- */
|
|
|
- vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
|
|
|
|
|
|
/*
|
|
|
* kvm_notify_acked_irq calls kvm_set_irq()
|
|
|
- * to reset the IRQ level. Need to release the
|
|
|
- * lock for kvm_set_irq to grab it.
|
|
|
+ * to reset the IRQ level, which grabs the dist->lock
|
|
|
+ * so we call this before taking the dist->lock.
|
|
|
*/
|
|
|
- spin_unlock(&dist->lock);
|
|
|
-
|
|
|
kvm_notify_acked_irq(kvm, 0,
|
|
|
vlr.irq - VGIC_NR_PRIVATE_IRQS);
|
|
|
- spin_lock(&dist->lock);
|
|
|
-
|
|
|
- /* Any additional pending interrupt? */
|
|
|
- if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
|
|
|
- vgic_cpu_irq_set(vcpu, vlr.irq);
|
|
|
- level_pending = true;
|
|
|
- } else {
|
|
|
- vgic_dist_irq_clear_pending(vcpu, vlr.irq);
|
|
|
- vgic_cpu_irq_clear(vcpu, vlr.irq);
|
|
|
- }
|
|
|
|
|
|
+ spin_lock(&dist->lock);
|
|
|
+ level_pending |= process_queued_irq(vcpu, lr, vlr);
|
|
|
spin_unlock(&dist->lock);
|
|
|
-
|
|
|
- /*
|
|
|
- * Despite being EOIed, the LR may not have
|
|
|
- * been marked as empty.
|
|
|
- */
|
|
|
- vgic_sync_lr_elrsr(vcpu, lr, vlr);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1410,40 +1391,40 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
|
|
/*
|
|
|
* Save the physical active state, and reset it to inactive.
|
|
|
*
|
|
|
- * Return 1 if HW interrupt went from active to inactive, and 0 otherwise.
|
|
|
+ * Return true if there's a pending forwarded interrupt to queue.
|
|
|
*/
|
|
|
-static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
|
|
|
+static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
|
|
|
{
|
|
|
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
struct irq_phys_map *map;
|
|
|
+ bool phys_active;
|
|
|
+ bool level_pending;
|
|
|
int ret;
|
|
|
|
|
|
if (!(vlr.state & LR_HW))
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
|
|
|
map = vgic_irq_map_search(vcpu, vlr.irq);
|
|
|
- BUG_ON(!map || !map->active);
|
|
|
+ BUG_ON(!map);
|
|
|
|
|
|
ret = irq_get_irqchip_state(map->irq,
|
|
|
IRQCHIP_STATE_ACTIVE,
|
|
|
- &map->active);
|
|
|
+ &phys_active);
|
|
|
|
|
|
WARN_ON(ret);
|
|
|
|
|
|
- if (map->active) {
|
|
|
- ret = irq_set_irqchip_state(map->irq,
|
|
|
- IRQCHIP_STATE_ACTIVE,
|
|
|
- false);
|
|
|
- WARN_ON(ret);
|
|
|
+ if (phys_active)
|
|
|
return 0;
|
|
|
- }
|
|
|
|
|
|
- return 1;
|
|
|
+ spin_lock(&dist->lock);
|
|
|
+ level_pending = process_queued_irq(vcpu, lr, vlr);
|
|
|
+ spin_unlock(&dist->lock);
|
|
|
+ return level_pending;
|
|
|
}
|
|
|
|
|
|
/* Sync back the VGIC state after a guest run */
|
|
|
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
u64 elrsr;
|
|
|
unsigned long *elrsr_ptr;
|
|
@@ -1451,40 +1432,18 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
|
bool level_pending;
|
|
|
|
|
|
level_pending = vgic_process_maintenance(vcpu);
|
|
|
- elrsr = vgic_get_elrsr(vcpu);
|
|
|
- elrsr_ptr = u64_to_bitmask(&elrsr);
|
|
|
|
|
|
/* Deal with HW interrupts, and clear mappings for empty LRs */
|
|
|
for (lr = 0; lr < vgic->nr_lr; lr++) {
|
|
|
- struct vgic_lr vlr;
|
|
|
-
|
|
|
- if (!test_bit(lr, vgic_cpu->lr_used))
|
|
|
- continue;
|
|
|
-
|
|
|
- vlr = vgic_get_lr(vcpu, lr);
|
|
|
- if (vgic_sync_hwirq(vcpu, vlr)) {
|
|
|
- /*
|
|
|
- * So this is a HW interrupt that the guest
|
|
|
- * EOI-ed. Clean the LR state and allow the
|
|
|
- * interrupt to be sampled again.
|
|
|
- */
|
|
|
- vlr.state = 0;
|
|
|
- vlr.hwirq = 0;
|
|
|
- vgic_set_lr(vcpu, lr, vlr);
|
|
|
- vgic_irq_clear_queued(vcpu, vlr.irq);
|
|
|
- set_bit(lr, elrsr_ptr);
|
|
|
- }
|
|
|
-
|
|
|
- if (!test_bit(lr, elrsr_ptr))
|
|
|
- continue;
|
|
|
-
|
|
|
- clear_bit(lr, vgic_cpu->lr_used);
|
|
|
+ struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
|
|
|
|
|
|
+ level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
|
|
|
BUG_ON(vlr.irq >= dist->nr_irqs);
|
|
|
- vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
|
|
|
}
|
|
|
|
|
|
/* Check if we still have something up our sleeve... */
|
|
|
+ elrsr = vgic_get_elrsr(vcpu);
|
|
|
+ elrsr_ptr = u64_to_bitmask(&elrsr);
|
|
|
pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
|
|
|
if (level_pending || pending < vgic->nr_lr)
|
|
|
set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
|
|
@@ -1574,6 +1533,8 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
|
|
|
int enabled;
|
|
|
bool ret = true, can_inject = true;
|
|
|
|
|
|
+ trace_vgic_update_irq_pending(cpuid, irq_num, level);
|
|
|
+
|
|
|
if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
|
|
|
return -EINVAL;
|
|
|
|
|
@@ -1607,8 +1568,12 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
|
|
|
} else {
|
|
|
if (level_triggered) {
|
|
|
vgic_dist_irq_clear_level(vcpu, irq_num);
|
|
|
- if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
|
|
|
+ if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
|
|
|
vgic_dist_irq_clear_pending(vcpu, irq_num);
|
|
|
+ vgic_cpu_irq_clear(vcpu, irq_num);
|
|
|
+ if (!compute_pending_for_cpu(vcpu))
|
|
|
+ clear_bit(cpuid, dist->irq_pending_on_cpu);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
ret = false;
|
|
@@ -1848,30 +1813,6 @@ static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
|
|
|
kfree(entry);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * kvm_vgic_get_phys_irq_active - Return the active state of a mapped IRQ
|
|
|
- *
|
|
|
- * Return the logical active state of a mapped interrupt. This doesn't
|
|
|
- * necessarily reflects the current HW state.
|
|
|
- */
|
|
|
-bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map)
|
|
|
-{
|
|
|
- BUG_ON(!map);
|
|
|
- return map->active;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * kvm_vgic_set_phys_irq_active - Set the active state of a mapped IRQ
|
|
|
- *
|
|
|
- * Set the logical active state of a mapped interrupt. This doesn't
|
|
|
- * immediately affects the HW state.
|
|
|
- */
|
|
|
-void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active)
|
|
|
-{
|
|
|
- BUG_ON(!map);
|
|
|
- map->active = active;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
|
|
|
* @vcpu: The VCPU pointer
|
|
@@ -1927,12 +1868,10 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|
|
kfree(vgic_cpu->pending_shared);
|
|
|
kfree(vgic_cpu->active_shared);
|
|
|
kfree(vgic_cpu->pend_act_shared);
|
|
|
- kfree(vgic_cpu->vgic_irq_lr_map);
|
|
|
vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
|
|
|
vgic_cpu->pending_shared = NULL;
|
|
|
vgic_cpu->active_shared = NULL;
|
|
|
vgic_cpu->pend_act_shared = NULL;
|
|
|
- vgic_cpu->vgic_irq_lr_map = NULL;
|
|
|
}
|
|
|
|
|
|
static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
|
|
@@ -1943,18 +1882,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
|
|
|
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
|
|
|
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
|
|
|
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
|
|
|
- vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
|
|
|
|
|
|
if (!vgic_cpu->pending_shared
|
|
|
|| !vgic_cpu->active_shared
|
|
|
- || !vgic_cpu->pend_act_shared
|
|
|
- || !vgic_cpu->vgic_irq_lr_map) {
|
|
|
+ || !vgic_cpu->pend_act_shared) {
|
|
|
kvm_vgic_vcpu_destroy(vcpu);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
|
|
|
-
|
|
|
/*
|
|
|
* Store the number of LRs per vcpu, so we don't have to go
|
|
|
* all the way to the distributor structure to find out. Only
|
|
@@ -2096,14 +2031,24 @@ int vgic_init(struct kvm *kvm)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- for (i = 0; i < dist->nr_irqs; i++) {
|
|
|
- if (i < VGIC_NR_PPIS)
|
|
|
+ /*
|
|
|
+ * Enable and configure all SGIs to be edge-triggere and
|
|
|
+ * configure all PPIs as level-triggered.
|
|
|
+ */
|
|
|
+ for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
|
|
|
+ if (i < VGIC_NR_SGIS) {
|
|
|
+ /* SGIs */
|
|
|
vgic_bitmap_set_irq_val(&dist->irq_enabled,
|
|
|
vcpu->vcpu_id, i, 1);
|
|
|
- if (i < VGIC_NR_PRIVATE_IRQS)
|
|
|
vgic_bitmap_set_irq_val(&dist->irq_cfg,
|
|
|
vcpu->vcpu_id, i,
|
|
|
VGIC_CFG_EDGE);
|
|
|
+ } else if (i < VGIC_NR_PRIVATE_IRQS) {
|
|
|
+ /* PPIs */
|
|
|
+ vgic_bitmap_set_irq_val(&dist->irq_cfg,
|
|
|
+ vcpu->vcpu_id, i,
|
|
|
+ VGIC_CFG_LEVEL);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
vgic_enable(vcpu);
|