|
@@ -663,18 +663,80 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
|
|
|
- struct kvm_exit_mmio *mmio,
|
|
|
- phys_addr_t offset)
|
|
|
+/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
|
|
|
+static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_exit_mmio *mmio,
|
|
|
+ phys_addr_t offset)
|
|
|
{
|
|
|
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
+ int sgi;
|
|
|
+ int min_sgi = (offset & ~0x3) * 4;
|
|
|
+ int max_sgi = min_sgi + 3;
|
|
|
+ int vcpu_id = vcpu->vcpu_id;
|
|
|
+ u32 reg = 0;
|
|
|
+
|
|
|
+ /* Copy source SGIs from distributor side */
|
|
|
+ for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
|
|
|
+ int shift = 8 * (sgi - min_sgi);
|
|
|
+ reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift;
|
|
|
+ }
|
|
|
+
|
|
|
+ mmio_data_write(mmio, ~0, reg);
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_exit_mmio *mmio,
|
|
|
+ phys_addr_t offset, bool set)
|
|
|
+{
|
|
|
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
|
+ int sgi;
|
|
|
+ int min_sgi = (offset & ~0x3) * 4;
|
|
|
+ int max_sgi = min_sgi + 3;
|
|
|
+ int vcpu_id = vcpu->vcpu_id;
|
|
|
+ u32 reg;
|
|
|
+ bool updated = false;
|
|
|
+
|
|
|
+ reg = mmio_data_read(mmio, ~0);
|
|
|
+
|
|
|
+ /* Clear pending SGIs on the distributor */
|
|
|
+ for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
|
|
|
+ u8 mask = reg >> (8 * (sgi - min_sgi));
|
|
|
+ if (set) {
|
|
|
+ if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask)
|
|
|
+ updated = true;
|
|
|
+ dist->irq_sgi_sources[vcpu_id][sgi] |= mask;
|
|
|
+ } else {
|
|
|
+ if (dist->irq_sgi_sources[vcpu_id][sgi] & mask)
|
|
|
+ updated = true;
|
|
|
+ dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (updated)
|
|
|
+ vgic_update_state(vcpu->kvm);
|
|
|
+
|
|
|
+ return updated;
|
|
|
+}
|
|
|
+
|
|
|
static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
|
|
|
struct kvm_exit_mmio *mmio,
|
|
|
phys_addr_t offset)
|
|
|
{
|
|
|
- return false;
|
|
|
+ if (!mmio->is_write)
|
|
|
+ return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
|
|
|
+ else
|
|
|
+ return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
|
|
|
+}
|
|
|
+
|
|
|
+static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_exit_mmio *mmio,
|
|
|
+ phys_addr_t offset)
|
|
|
+{
|
|
|
+ if (!mmio->is_write)
|
|
|
+ return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
|
|
|
+ else
|
|
|
+ return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
|
|
|
}
|
|
|
|
|
|
/*
|