|
@@ -235,7 +235,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
|
|
struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
|
|
struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- if (!synic->active)
|
|
|
|
|
|
+ if (!synic->active && !host)
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
|
|
trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
|
|
@@ -295,11 +295,12 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
|
|
|
|
|
|
+static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
|
|
|
|
+ bool host)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- if (!synic->active)
|
|
|
|
|
|
+ if (!synic->active && !host)
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
ret = 0;
|
|
ret = 0;
|
|
@@ -1014,6 +1015,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
|
|
case HV_X64_MSR_TSC_EMULATION_STATUS:
|
|
case HV_X64_MSR_TSC_EMULATION_STATUS:
|
|
hv->hv_tsc_emulation_status = data;
|
|
hv->hv_tsc_emulation_status = data;
|
|
break;
|
|
break;
|
|
|
|
+ case HV_X64_MSR_TIME_REF_COUNT:
|
|
|
|
+ /* read-only, but still ignore it if host-initiated */
|
|
|
|
+ if (!host)
|
|
|
|
+ return 1;
|
|
|
|
+ break;
|
|
default:
|
|
default:
|
|
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
|
|
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
|
|
msr, data);
|
|
msr, data);
|
|
@@ -1101,6 +1107,12 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
|
return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
|
|
return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
|
|
data, host);
|
|
data, host);
|
|
}
|
|
}
|
|
|
|
+ case HV_X64_MSR_TSC_FREQUENCY:
|
|
|
|
+ case HV_X64_MSR_APIC_FREQUENCY:
|
|
|
|
+ /* read-only, but still ignore it if host-initiated */
|
|
|
|
+ if (!host)
|
|
|
|
+ return 1;
|
|
|
|
+ break;
|
|
default:
|
|
default:
|
|
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
|
|
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
|
|
msr, data);
|
|
msr, data);
|
|
@@ -1156,7 +1168,8 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|
|
|
|
|
+static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
|
|
|
|
+ bool host)
|
|
{
|
|
{
|
|
u64 data = 0;
|
|
u64 data = 0;
|
|
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
|
|
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
|
|
@@ -1183,7 +1196,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|
case HV_X64_MSR_SIMP:
|
|
case HV_X64_MSR_SIMP:
|
|
case HV_X64_MSR_EOM:
|
|
case HV_X64_MSR_EOM:
|
|
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
|
|
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
|
|
- return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
|
|
|
|
|
|
+ return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
|
|
case HV_X64_MSR_STIMER0_CONFIG:
|
|
case HV_X64_MSR_STIMER0_CONFIG:
|
|
case HV_X64_MSR_STIMER1_CONFIG:
|
|
case HV_X64_MSR_STIMER1_CONFIG:
|
|
case HV_X64_MSR_STIMER2_CONFIG:
|
|
case HV_X64_MSR_STIMER2_CONFIG:
|
|
@@ -1229,7 +1242,7 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
|
return kvm_hv_set_msr(vcpu, msr, data, host);
|
|
return kvm_hv_set_msr(vcpu, msr, data, host);
|
|
}
|
|
}
|
|
|
|
|
|
-int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|
|
|
|
|
+int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
|
|
{
|
|
{
|
|
if (kvm_hv_msr_partition_wide(msr)) {
|
|
if (kvm_hv_msr_partition_wide(msr)) {
|
|
int r;
|
|
int r;
|
|
@@ -1239,7 +1252,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|
mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
|
|
mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
|
|
return r;
|
|
return r;
|
|
} else
|
|
} else
|
|
- return kvm_hv_get_msr(vcpu, msr, pdata);
|
|
|
|
|
|
+ return kvm_hv_get_msr(vcpu, msr, pdata, host);
|
|
}
|
|
}
|
|
|
|
|
|
static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
|
|
static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
|