|
@@ -693,6 +693,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
|
|
|
stimer_cleanup(&hv_vcpu->stimer[i]);
|
|
|
}
|
|
|
|
|
|
+bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
|
|
|
+ return false;
|
|
|
+ return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
|
|
|
+
|
|
|
+bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
|
|
|
+ struct hv_vp_assist_page *assist_page)
|
|
|
+{
|
|
|
+ if (!kvm_hv_assist_page_enabled(vcpu))
|
|
|
+ return false;
|
|
|
+ return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
|
|
|
+ assist_page, sizeof(*assist_page));
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
|
|
|
+
|
|
|
static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
|
|
|
{
|
|
|
struct hv_message *msg = &stimer->msg;
|
|
@@ -1078,7 +1096,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
|
|
|
|
|
if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
|
|
|
hv_vcpu->hv_vapic = data;
|
|
|
- if (kvm_lapic_enable_pv_eoi(vcpu, 0))
|
|
|
+ if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
|
|
|
return 1;
|
|
|
break;
|
|
|
}
|
|
@@ -1091,7 +1109,8 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
|
|
hv_vcpu->hv_vapic = data;
|
|
|
kvm_vcpu_mark_page_dirty(vcpu, gfn);
|
|
|
if (kvm_lapic_enable_pv_eoi(vcpu,
|
|
|
- gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
|
|
|
+ gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
|
|
|
+ sizeof(struct hv_vp_assist_page)))
|
|
|
return 1;
|
|
|
break;
|
|
|
}
|