|
@@ -71,6 +71,8 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
|
|
|
#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
|
|
|
#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
|
|
|
|
|
|
+#define SVM_AVIC_DOORBELL 0xc001011b
|
|
|
+
|
|
|
#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
|
|
|
#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
|
|
|
#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
|
|
@@ -293,6 +295,17 @@ static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
|
|
|
mark_dirty(svm->vmcb, VMCB_AVIC);
|
|
|
}
|
|
|
|
|
|
+static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
+ u64 *entry = svm->avic_physical_id_cache;
|
|
|
+
|
|
|
+ if (!entry)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
|
|
|
+}
|
|
|
+
|
|
|
static void recalc_intercepts(struct vcpu_svm *svm)
|
|
|
{
|
|
|
struct vmcb_control_area *c, *h;
|
|
@@ -2866,10 +2879,11 @@ static int clgi_interception(struct vcpu_svm *svm)
|
|
|
disable_gif(svm);
|
|
|
|
|
|
/* After a CLGI no interrupts should come */
|
|
|
- svm_clear_vintr(svm);
|
|
|
- svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
|
|
|
-
|
|
|
- mark_dirty(svm->vmcb, VMCB_INTR);
|
|
|
+ if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
|
|
|
+ svm_clear_vintr(svm);
|
|
|
+ svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
|
|
|
+ mark_dirty(svm->vmcb, VMCB_INTR);
|
|
|
+ }
|
|
|
|
|
|
return 1;
|
|
|
}
|
|
@@ -3763,6 +3777,7 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
|
|
|
{
|
|
|
struct vmcb_control_area *control;
|
|
|
|
|
|
+ /* The following fields are ignored when AVIC is enabled */
|
|
|
control = &svm->vmcb->control;
|
|
|
control->int_vector = irq;
|
|
|
control->int_ctl &= ~V_INTR_PRIO_MASK;
|
|
@@ -3841,6 +3856,18 @@ static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
|
|
|
+{
|
|
|
+ kvm_lapic_set_irr(vec, vcpu->arch.apic);
|
|
|
+ smp_mb__after_atomic();
|
|
|
+
|
|
|
+ if (avic_vcpu_is_running(vcpu))
|
|
|
+ wrmsrl(SVM_AVIC_DOORBELL,
|
|
|
+ __default_cpu_present_to_apicid(vcpu->cpu));
|
|
|
+ else
|
|
|
+ kvm_vcpu_wake_up(vcpu);
|
|
|
+}
|
|
|
+
|
|
|
static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
@@ -3895,6 +3922,9 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
|
+ if (kvm_vcpu_apicv_active(vcpu))
|
|
|
+ return;
|
|
|
+
|
|
|
/*
|
|
|
* In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
|
|
|
* 1, because that's a separate STGI/VMRUN intercept. The next time we
|
|
@@ -4638,6 +4668,7 @@ static struct kvm_x86_ops svm_x86_ops = {
|
|
|
.sched_in = svm_sched_in,
|
|
|
|
|
|
.pmu_ops = &amd_pmu_ops,
|
|
|
+ .deliver_posted_interrupt = svm_deliver_avic_intr,
|
|
|
};
|
|
|
|
|
|
static int __init svm_init(void)
|