|
@@ -1343,6 +1343,68 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
|
|
|
|
+
|
|
|
|
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ struct kvm_lapic *apic = vcpu->arch.apic;
|
|
|
|
+
|
|
|
|
+ WARN_ON(!apic->lapic_timer.hv_timer_in_use);
|
|
|
|
+ WARN_ON(swait_active(&vcpu->wq));
|
|
|
|
+ kvm_x86_ops->cancel_hv_timer(vcpu);
|
|
|
|
+ apic->lapic_timer.hv_timer_in_use = false;
|
|
|
|
+ apic_timer_expired(apic);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
|
|
|
|
+
|
|
|
|
+void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ struct kvm_lapic *apic = vcpu->arch.apic;
|
|
|
|
+
|
|
|
|
+ WARN_ON(apic->lapic_timer.hv_timer_in_use);
|
|
|
|
+
|
|
|
|
+ if (apic_lvtt_tscdeadline(apic) &&
|
|
|
|
+ !atomic_read(&apic->lapic_timer.pending)) {
|
|
|
|
+ u64 tscdeadline = apic->lapic_timer.tscdeadline;
|
|
|
|
+
|
|
|
|
+ if (!kvm_x86_ops->set_hv_timer(vcpu, tscdeadline)) {
|
|
|
|
+ apic->lapic_timer.hv_timer_in_use = true;
|
|
|
|
+ hrtimer_cancel(&apic->lapic_timer.timer);
|
|
|
|
+
|
|
|
|
+ /* In case the sw timer triggered in the window */
|
|
|
|
+ if (atomic_read(&apic->lapic_timer.pending)) {
|
|
|
|
+ apic->lapic_timer.hv_timer_in_use = false;
|
|
|
|
+ kvm_x86_ops->cancel_hv_timer(apic->vcpu);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ trace_kvm_hv_timer_state(vcpu->vcpu_id,
|
|
|
|
+ apic->lapic_timer.hv_timer_in_use);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
|
|
|
|
+
|
|
|
|
+void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ struct kvm_lapic *apic = vcpu->arch.apic;
|
|
|
|
+
|
|
|
|
+ /* Possibly the TSC deadline timer is not enabled yet */
|
|
|
|
+ if (!apic->lapic_timer.hv_timer_in_use)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ kvm_x86_ops->cancel_hv_timer(vcpu);
|
|
|
|
+ apic->lapic_timer.hv_timer_in_use = false;
|
|
|
|
+
|
|
|
|
+ if (atomic_read(&apic->lapic_timer.pending))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ start_sw_tscdeadline(apic);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
|
|
|
|
+
|
|
static void start_apic_timer(struct kvm_lapic *apic)
|
|
static void start_apic_timer(struct kvm_lapic *apic)
|
|
{
|
|
{
|
|
ktime_t now;
|
|
ktime_t now;
|
|
@@ -1389,7 +1451,16 @@ static void start_apic_timer(struct kvm_lapic *apic)
|
|
ktime_to_ns(ktime_add_ns(now,
|
|
ktime_to_ns(ktime_add_ns(now,
|
|
apic->lapic_timer.period)));
|
|
apic->lapic_timer.period)));
|
|
} else if (apic_lvtt_tscdeadline(apic)) {
|
|
} else if (apic_lvtt_tscdeadline(apic)) {
|
|
- start_sw_tscdeadline(apic);
|
|
|
|
|
|
+ /* lapic timer in tsc deadline mode */
|
|
|
|
+ u64 tscdeadline = apic->lapic_timer.tscdeadline;
|
|
|
|
+
|
|
|
|
+ if (kvm_x86_ops->set_hv_timer &&
|
|
|
|
+ !kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
|
|
|
|
+ apic->lapic_timer.hv_timer_in_use = true;
|
|
|
|
+ trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
|
|
|
|
+ apic->lapic_timer.hv_timer_in_use);
|
|
|
|
+ } else
|
|
|
|
+ start_sw_tscdeadline(apic);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|