|
@@ -163,6 +163,55 @@ static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
|
|
|
|
+ int irq_level, bool line_status)
|
|
|
|
+{
|
|
|
|
+ union kvm_ioapic_redirect_entry entry;
|
|
|
|
+ u32 mask = 1 << irq;
|
|
|
|
+ u32 old_irr;
|
|
|
|
+ int edge, ret;
|
|
|
|
+
|
|
|
|
+ entry = ioapic->redirtbl[irq];
|
|
|
|
+ edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
|
|
|
|
+
|
|
|
|
+ if (!irq_level) {
|
|
|
|
+ ioapic->irr &= ~mask;
|
|
|
|
+ ret = 1;
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Return 0 for coalesced interrupts; for edge-triggered interrupts,
|
|
|
|
+ * this only happens if a previous edge has not been delivered due
|
|
|
|
+ * do masking. For level interrupts, the remote_irr field tells
|
|
|
|
+ * us if the interrupt is waiting for an EOI.
|
|
|
|
+ *
|
|
|
|
+ * RTC is special: it is edge-triggered, but userspace likes to know
|
|
|
|
+ * if it has been already ack-ed via EOI because coalesced RTC
|
|
|
|
+ * interrupts lead to time drift in Windows guests. So we track
|
|
|
|
+ * EOI manually for the RTC interrupt.
|
|
|
|
+ */
|
|
|
|
+ if (irq == RTC_GSI && line_status &&
|
|
|
|
+ rtc_irq_check_coalesced(ioapic)) {
|
|
|
|
+ ret = 0;
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ old_irr = ioapic->irr;
|
|
|
|
+ ioapic->irr |= mask;
|
|
|
|
+ if ((edge && old_irr == ioapic->irr) ||
|
|
|
|
+ (!edge && entry.fields.remote_irr)) {
|
|
|
|
+ ret = 0;
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ret = ioapic_service(ioapic, irq, line_status);
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
static void update_handled_vectors(struct kvm_ioapic *ioapic)
|
|
static void update_handled_vectors(struct kvm_ioapic *ioapic)
|
|
{
|
|
{
|
|
DECLARE_BITMAP(handled_vectors, 256);
|
|
DECLARE_BITMAP(handled_vectors, 256);
|
|
@@ -308,38 +357,15 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
|
|
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
|
|
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
|
|
int level, bool line_status)
|
|
int level, bool line_status)
|
|
{
|
|
{
|
|
- u32 old_irr;
|
|
|
|
- u32 mask = 1 << irq;
|
|
|
|
- union kvm_ioapic_redirect_entry entry;
|
|
|
|
int ret, irq_level;
|
|
int ret, irq_level;
|
|
|
|
|
|
BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
|
|
BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
|
|
|
|
|
|
spin_lock(&ioapic->lock);
|
|
spin_lock(&ioapic->lock);
|
|
- old_irr = ioapic->irr;
|
|
|
|
irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
|
|
irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
|
|
irq_source_id, level);
|
|
irq_source_id, level);
|
|
- entry = ioapic->redirtbl[irq];
|
|
|
|
- if (!irq_level) {
|
|
|
|
- ioapic->irr &= ~mask;
|
|
|
|
- ret = 1;
|
|
|
|
- } else {
|
|
|
|
- int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
|
|
|
|
|
|
+ ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
|
|
|
|
|
|
- if (irq == RTC_GSI && line_status &&
|
|
|
|
- rtc_irq_check_coalesced(ioapic)) {
|
|
|
|
- ret = 0; /* coalesced */
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
- ioapic->irr |= mask;
|
|
|
|
- if ((edge && old_irr != ioapic->irr) ||
|
|
|
|
- (!edge && !entry.fields.remote_irr))
|
|
|
|
- ret = ioapic_service(ioapic, irq, line_status);
|
|
|
|
- else
|
|
|
|
- ret = 0; /* report coalesced interrupt */
|
|
|
|
- }
|
|
|
|
-out:
|
|
|
|
- trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
|
|
|
|
spin_unlock(&ioapic->lock);
|
|
spin_unlock(&ioapic->lock);
|
|
|
|
|
|
return ret;
|
|
return ret;
|