浏览代码

KVM: Use eoi to track RTC interrupt delivery status

Current interrupt coalescing logci which only used by RTC has conflict
with Posted Interrupt.
This patch introduces a new mechinism to use eoi to track interrupt:
When delivering an interrupt to vcpu, the pending_eoi set to number of
vcpu that received the interrupt. And decrease it when each vcpu writing
eoi. No subsequent RTC interrupt can deliver to vcpu until all vcpus
write eoi.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Yang Zhang 12 年之前
父节点
当前提交
2c2bf01136
共有 1 个文件被更改,包括 35 次插入1 次删除
  1. 35 1
      virt/kvm/ioapic.c

+ 35 - 1
virt/kvm/ioapic.c

@@ -147,6 +147,22 @@ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
 	    __rtc_irq_eoi_tracking_restore_one(vcpu);
 	    __rtc_irq_eoi_tracking_restore_one(vcpu);
 }
 }
 
 
+static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
+{
+	if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map))
+		--ioapic->rtc_status.pending_eoi;
+
+	WARN_ON(ioapic->rtc_status.pending_eoi < 0);
+}
+
+static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
+{
+	if (ioapic->rtc_status.pending_eoi > 0)
+		return true; /* coalesced */
+
+	return false;
+}
+
 static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx,
 static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx,
 		bool line_status)
 		bool line_status)
 {
 {
@@ -260,6 +276,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status)
 {
 {
 	union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
 	union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
 	struct kvm_lapic_irq irqe;
 	struct kvm_lapic_irq irqe;
+	int ret;
 
 
 	ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
 	ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
 		     "vector=%x trig_mode=%x\n",
 		     "vector=%x trig_mode=%x\n",
@@ -275,7 +292,15 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status)
 	irqe.level = 1;
 	irqe.level = 1;
 	irqe.shorthand = 0;
 	irqe.shorthand = 0;
 
 
-	return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
+	if (irq == RTC_GSI && line_status) {
+		BUG_ON(ioapic->rtc_status.pending_eoi != 0);
+		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
+				ioapic->rtc_status.dest_map);
+		ioapic->rtc_status.pending_eoi = ret;
+	} else
+		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
+
+	return ret;
 }
 }
 
 
 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
@@ -299,6 +324,12 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
 		ret = 1;
 		ret = 1;
 	} else {
 	} else {
 		int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
 		int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
+
+		if (irq == RTC_GSI && line_status &&
+			rtc_irq_check_coalesced(ioapic)) {
+			ret = 0; /* coalesced */
+			goto out;
+		}
 		ioapic->irr |= mask;
 		ioapic->irr |= mask;
 		if ((edge && old_irr != ioapic->irr) ||
 		if ((edge && old_irr != ioapic->irr) ||
 		    (!edge && !entry.fields.remote_irr))
 		    (!edge && !entry.fields.remote_irr))
@@ -306,6 +337,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
 		else
 		else
 			ret = 0; /* report coalesced interrupt */
 			ret = 0; /* report coalesced interrupt */
 	}
 	}
+out:
 	trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
 	trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
 	spin_unlock(&ioapic->lock);
 	spin_unlock(&ioapic->lock);
 
 
@@ -333,6 +365,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
 		if (ent->fields.vector != vector)
 		if (ent->fields.vector != vector)
 			continue;
 			continue;
 
 
+		if (i == RTC_GSI)
+			rtc_irq_eoi(ioapic, vcpu);
 		/*
 		/*
 		 * We are dropping lock while calling ack notifiers because ack
 		 * We are dropping lock while calling ack notifiers because ack
 		 * notifier callbacks for assigned devices call into IOAPIC
 		 * notifier callbacks for assigned devices call into IOAPIC