|
@@ -108,6 +108,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
|
|
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
|
|
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
|
|
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
|
|
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
|
|
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
|
|
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
|
|
|
|
+static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
|
|
static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
|
|
static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
|
|
int virt_irq);
|
|
int virt_irq);
|
|
static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
|
|
static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
|
|
@@ -691,9 +692,11 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
|
|
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
|
|
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
|
+ u64 elrsr = vgic_get_elrsr(vcpu);
|
|
|
|
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
|
|
|
|
|
|
+ for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
|
|
struct vgic_lr lr = vgic_get_lr(vcpu, i);
|
|
struct vgic_lr lr = vgic_get_lr(vcpu, i);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1098,7 +1101,6 @@ static inline void vgic_enable(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
|
|
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
|
struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
|
|
struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1112,8 +1114,6 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
|
|
|
|
|
|
vlr.state = 0;
|
|
vlr.state = 0;
|
|
vgic_set_lr(vcpu, lr_nr, vlr);
|
|
vgic_set_lr(vcpu, lr_nr, vlr);
|
|
- clear_bit(lr_nr, vgic_cpu->lr_used);
|
|
|
|
- vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
|
|
|
|
vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
|
|
vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1128,10 +1128,11 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
|
|
*/
|
|
*/
|
|
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
|
|
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
|
|
|
+ u64 elrsr = vgic_get_elrsr(vcpu);
|
|
|
|
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
|
|
int lr;
|
|
int lr;
|
|
|
|
|
|
- for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
|
|
|
|
|
|
+ for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
|
|
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
|
|
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
|
|
|
|
|
|
if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
|
|
if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
|
|
@@ -1188,8 +1189,9 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
|
|
*/
|
|
*/
|
|
bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
|
bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
|
{
|
|
{
|
|
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
|
+ u64 elrsr = vgic_get_elrsr(vcpu);
|
|
|
|
+ unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
|
|
struct vgic_lr vlr;
|
|
struct vgic_lr vlr;
|
|
int lr;
|
|
int lr;
|
|
|
|
|
|
@@ -1200,28 +1202,22 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
|
|
|
|
|
kvm_debug("Queue IRQ%d\n", irq);
|
|
kvm_debug("Queue IRQ%d\n", irq);
|
|
|
|
|
|
- lr = vgic_cpu->vgic_irq_lr_map[irq];
|
|
|
|
-
|
|
|
|
/* Do we have an active interrupt for the same CPUID? */
|
|
/* Do we have an active interrupt for the same CPUID? */
|
|
- if (lr != LR_EMPTY) {
|
|
|
|
|
|
+ for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
|
|
vlr = vgic_get_lr(vcpu, lr);
|
|
vlr = vgic_get_lr(vcpu, lr);
|
|
- if (vlr.source == sgi_source_id) {
|
|
|
|
|
|
+ if (vlr.irq == irq && vlr.source == sgi_source_id) {
|
|
kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
|
|
kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
|
|
- BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
|
|
|
|
vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
|
|
vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
/* Try to use another LR for this interrupt */
|
|
/* Try to use another LR for this interrupt */
|
|
- lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
|
|
|
|
- vgic->nr_lr);
|
|
|
|
|
|
+ lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
|
|
if (lr >= vgic->nr_lr)
|
|
if (lr >= vgic->nr_lr)
|
|
return false;
|
|
return false;
|
|
|
|
|
|
kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
|
|
kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
|
|
- vgic_cpu->vgic_irq_lr_map[irq] = lr;
|
|
|
|
- set_bit(lr, vgic_cpu->lr_used);
|
|
|
|
|
|
|
|
vlr.irq = irq;
|
|
vlr.irq = irq;
|
|
vlr.source = sgi_source_id;
|
|
vlr.source = sgi_source_id;
|
|
@@ -1456,7 +1452,6 @@ static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
|
|
/* Sync back the VGIC state after a guest run */
|
|
/* Sync back the VGIC state after a guest run */
|
|
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
u64 elrsr;
|
|
u64 elrsr;
|
|
unsigned long *elrsr_ptr;
|
|
unsigned long *elrsr_ptr;
|
|
@@ -1469,22 +1464,10 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
/* Deal with HW interrupts, and clear mappings for empty LRs */
|
|
/* Deal with HW interrupts, and clear mappings for empty LRs */
|
|
for (lr = 0; lr < vgic->nr_lr; lr++) {
|
|
for (lr = 0; lr < vgic->nr_lr; lr++) {
|
|
- struct vgic_lr vlr;
|
|
|
|
-
|
|
|
|
- if (!test_bit(lr, vgic_cpu->lr_used))
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- vlr = vgic_get_lr(vcpu, lr);
|
|
|
|
- if (vgic_sync_hwirq(vcpu, lr, vlr))
|
|
|
|
- level_pending = true;
|
|
|
|
-
|
|
|
|
- if (!test_bit(lr, elrsr_ptr))
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- clear_bit(lr, vgic_cpu->lr_used);
|
|
|
|
|
|
+ struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
|
|
|
|
|
|
|
|
+ level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
|
|
BUG_ON(vlr.irq >= dist->nr_irqs);
|
|
BUG_ON(vlr.irq >= dist->nr_irqs);
|
|
- vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* Check if we still have something up our sleeve... */
|
|
/* Check if we still have something up our sleeve... */
|
|
@@ -1912,12 +1895,10 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|
kfree(vgic_cpu->pending_shared);
|
|
kfree(vgic_cpu->pending_shared);
|
|
kfree(vgic_cpu->active_shared);
|
|
kfree(vgic_cpu->active_shared);
|
|
kfree(vgic_cpu->pend_act_shared);
|
|
kfree(vgic_cpu->pend_act_shared);
|
|
- kfree(vgic_cpu->vgic_irq_lr_map);
|
|
|
|
vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
|
|
vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
|
|
vgic_cpu->pending_shared = NULL;
|
|
vgic_cpu->pending_shared = NULL;
|
|
vgic_cpu->active_shared = NULL;
|
|
vgic_cpu->active_shared = NULL;
|
|
vgic_cpu->pend_act_shared = NULL;
|
|
vgic_cpu->pend_act_shared = NULL;
|
|
- vgic_cpu->vgic_irq_lr_map = NULL;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
|
|
static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
|
|
@@ -1928,18 +1909,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
|
|
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
|
|
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
|
|
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
|
|
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
|
|
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
|
|
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
|
|
- vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!vgic_cpu->pending_shared
|
|
if (!vgic_cpu->pending_shared
|
|
|| !vgic_cpu->active_shared
|
|
|| !vgic_cpu->active_shared
|
|
- || !vgic_cpu->pend_act_shared
|
|
|
|
- || !vgic_cpu->vgic_irq_lr_map) {
|
|
|
|
|
|
+ || !vgic_cpu->pend_act_shared) {
|
|
kvm_vgic_vcpu_destroy(vcpu);
|
|
kvm_vgic_vcpu_destroy(vcpu);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
- memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Store the number of LRs per vcpu, so we don't have to go
|
|
* Store the number of LRs per vcpu, so we don't have to go
|
|
* all the way to the distributor structure to find out. Only
|
|
* all the way to the distributor structure to find out. Only
|