|
@@ -1042,20 +1042,20 @@ static u64 current_task_runtime_100ns(void)
|
|
|
|
|
|
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
|
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
|
{
|
|
{
|
|
- struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
|
|
|
|
|
|
+ struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
|
|
|
|
|
|
switch (msr) {
|
|
switch (msr) {
|
|
case HV_X64_MSR_VP_INDEX:
|
|
case HV_X64_MSR_VP_INDEX:
|
|
if (!host || (u32)data >= KVM_MAX_VCPUS)
|
|
if (!host || (u32)data >= KVM_MAX_VCPUS)
|
|
return 1;
|
|
return 1;
|
|
- hv->vp_index = (u32)data;
|
|
|
|
|
|
+ hv_vcpu->vp_index = (u32)data;
|
|
break;
|
|
break;
|
|
case HV_X64_MSR_VP_ASSIST_PAGE: {
|
|
case HV_X64_MSR_VP_ASSIST_PAGE: {
|
|
u64 gfn;
|
|
u64 gfn;
|
|
unsigned long addr;
|
|
unsigned long addr;
|
|
|
|
|
|
if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
|
|
if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
|
|
- hv->hv_vapic = data;
|
|
|
|
|
|
+ hv_vcpu->hv_vapic = data;
|
|
if (kvm_lapic_enable_pv_eoi(vcpu, 0))
|
|
if (kvm_lapic_enable_pv_eoi(vcpu, 0))
|
|
return 1;
|
|
return 1;
|
|
break;
|
|
break;
|
|
@@ -1066,7 +1066,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
|
return 1;
|
|
return 1;
|
|
if (__clear_user((void __user *)addr, PAGE_SIZE))
|
|
if (__clear_user((void __user *)addr, PAGE_SIZE))
|
|
return 1;
|
|
return 1;
|
|
- hv->hv_vapic = data;
|
|
|
|
|
|
+ hv_vcpu->hv_vapic = data;
|
|
kvm_vcpu_mark_page_dirty(vcpu, gfn);
|
|
kvm_vcpu_mark_page_dirty(vcpu, gfn);
|
|
if (kvm_lapic_enable_pv_eoi(vcpu,
|
|
if (kvm_lapic_enable_pv_eoi(vcpu,
|
|
gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
|
|
gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
|
|
@@ -1082,7 +1082,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
|
case HV_X64_MSR_VP_RUNTIME:
|
|
case HV_X64_MSR_VP_RUNTIME:
|
|
if (!host)
|
|
if (!host)
|
|
return 1;
|
|
return 1;
|
|
- hv->runtime_offset = data - current_task_runtime_100ns();
|
|
|
|
|
|
+ hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
|
|
break;
|
|
break;
|
|
case HV_X64_MSR_SCONTROL:
|
|
case HV_X64_MSR_SCONTROL:
|
|
case HV_X64_MSR_SVERSION:
|
|
case HV_X64_MSR_SVERSION:
|
|
@@ -1174,11 +1174,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
|
|
bool host)
|
|
bool host)
|
|
{
|
|
{
|
|
u64 data = 0;
|
|
u64 data = 0;
|
|
- struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
|
|
|
|
|
|
+ struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
|
|
|
|
|
|
switch (msr) {
|
|
switch (msr) {
|
|
case HV_X64_MSR_VP_INDEX:
|
|
case HV_X64_MSR_VP_INDEX:
|
|
- data = hv->vp_index;
|
|
|
|
|
|
+ data = hv_vcpu->vp_index;
|
|
break;
|
|
break;
|
|
case HV_X64_MSR_EOI:
|
|
case HV_X64_MSR_EOI:
|
|
return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
|
|
return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
|
|
@@ -1187,10 +1187,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
|
|
case HV_X64_MSR_TPR:
|
|
case HV_X64_MSR_TPR:
|
|
return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
|
|
return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
|
|
case HV_X64_MSR_VP_ASSIST_PAGE:
|
|
case HV_X64_MSR_VP_ASSIST_PAGE:
|
|
- data = hv->hv_vapic;
|
|
|
|
|
|
+ data = hv_vcpu->hv_vapic;
|
|
break;
|
|
break;
|
|
case HV_X64_MSR_VP_RUNTIME:
|
|
case HV_X64_MSR_VP_RUNTIME:
|
|
- data = current_task_runtime_100ns() + hv->runtime_offset;
|
|
|
|
|
|
+ data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
|
|
break;
|
|
break;
|
|
case HV_X64_MSR_SCONTROL:
|
|
case HV_X64_MSR_SCONTROL:
|
|
case HV_X64_MSR_SVERSION:
|
|
case HV_X64_MSR_SVERSION:
|