|
@@ -310,8 +310,8 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
|
|
(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
|
|
u64 new_state = msr_info->data &
|
|
u64 new_state = msr_info->data &
|
|
(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
|
|
(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
|
|
- u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
|
|
|
|
- 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
|
|
|
|
|
|
+ u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
|
|
|
|
+ (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
|
|
|
|
|
|
if (!msr_info->host_initiated &&
|
|
if (!msr_info->host_initiated &&
|
|
((msr_info->data & reserved_bits) != 0 ||
|
|
((msr_info->data & reserved_bits) != 0 ||
|
|
@@ -754,19 +754,19 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|
if (cr4 & CR4_RESERVED_BITS)
|
|
if (cr4 & CR4_RESERVED_BITS)
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
- if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
- if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
- if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
- if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
- if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
if (is_long_mode(vcpu)) {
|
|
if (is_long_mode(vcpu)) {
|
|
@@ -779,7 +779,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
|
|
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
|
|
- if (!guest_cpuid_has_pcid(vcpu))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
|
|
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
|
|
@@ -883,7 +883,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
u64 fixed = DR6_FIXED_1;
|
|
u64 fixed = DR6_FIXED_1;
|
|
|
|
|
|
- if (!guest_cpuid_has_rtm(vcpu))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
|
|
fixed |= DR6_RTM;
|
|
fixed |= DR6_RTM;
|
|
return fixed;
|
|
return fixed;
|
|
}
|
|
}
|
|
@@ -1534,8 +1534,9 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
|
|
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
|
|
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
|
|
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
|
|
|
|
|
|
- if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
|
|
|
|
|
|
+ if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST))
|
|
update_ia32_tsc_adjust_msr(vcpu, offset);
|
|
update_ia32_tsc_adjust_msr(vcpu, offset);
|
|
|
|
+
|
|
kvm_vcpu_write_tsc_offset(vcpu, offset);
|
|
kvm_vcpu_write_tsc_offset(vcpu, offset);
|
|
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
|
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
|
|
|
|
|
@@ -2185,7 +2186,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
kvm_set_lapic_tscdeadline_msr(vcpu, data);
|
|
kvm_set_lapic_tscdeadline_msr(vcpu, data);
|
|
break;
|
|
break;
|
|
case MSR_IA32_TSC_ADJUST:
|
|
case MSR_IA32_TSC_ADJUST:
|
|
- if (guest_cpuid_has_tsc_adjust(vcpu)) {
|
|
|
|
|
|
+ if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
|
|
if (!msr_info->host_initiated) {
|
|
if (!msr_info->host_initiated) {
|
|
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
|
|
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
|
|
adjust_tsc_offset_guest(vcpu, adj);
|
|
adjust_tsc_offset_guest(vcpu, adj);
|
|
@@ -2307,12 +2308,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
|
|
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
|
|
break;
|
|
break;
|
|
case MSR_AMD64_OSVW_ID_LENGTH:
|
|
case MSR_AMD64_OSVW_ID_LENGTH:
|
|
- if (!guest_cpuid_has_osvw(vcpu))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
|
|
return 1;
|
|
return 1;
|
|
vcpu->arch.osvw.length = data;
|
|
vcpu->arch.osvw.length = data;
|
|
break;
|
|
break;
|
|
case MSR_AMD64_OSVW_STATUS:
|
|
case MSR_AMD64_OSVW_STATUS:
|
|
- if (!guest_cpuid_has_osvw(vcpu))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
|
|
return 1;
|
|
return 1;
|
|
vcpu->arch.osvw.status = data;
|
|
vcpu->arch.osvw.status = data;
|
|
break;
|
|
break;
|
|
@@ -2537,12 +2538,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
msr_info->data = 0xbe702111;
|
|
msr_info->data = 0xbe702111;
|
|
break;
|
|
break;
|
|
case MSR_AMD64_OSVW_ID_LENGTH:
|
|
case MSR_AMD64_OSVW_ID_LENGTH:
|
|
- if (!guest_cpuid_has_osvw(vcpu))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
|
|
return 1;
|
|
return 1;
|
|
msr_info->data = vcpu->arch.osvw.length;
|
|
msr_info->data = vcpu->arch.osvw.length;
|
|
break;
|
|
break;
|
|
case MSR_AMD64_OSVW_STATUS:
|
|
case MSR_AMD64_OSVW_STATUS:
|
|
- if (!guest_cpuid_has_osvw(vcpu))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
|
|
return 1;
|
|
return 1;
|
|
msr_info->data = vcpu->arch.osvw.status;
|
|
msr_info->data = vcpu->arch.osvw.status;
|
|
break;
|
|
break;
|
|
@@ -6606,7 +6607,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
|
|
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
|
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
|
vcpu->arch.hflags |= HF_SMM_MASK;
|
|
vcpu->arch.hflags |= HF_SMM_MASK;
|
|
memset(buf, 0, 512);
|
|
memset(buf, 0, 512);
|
|
- if (guest_cpuid_has_longmode(vcpu))
|
|
|
|
|
|
+ if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
|
enter_smm_save_state_64(vcpu, buf);
|
|
enter_smm_save_state_64(vcpu, buf);
|
|
else
|
|
else
|
|
enter_smm_save_state_32(vcpu, buf);
|
|
enter_smm_save_state_32(vcpu, buf);
|
|
@@ -6658,7 +6659,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
|
|
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
|
|
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
|
|
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
|
|
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
|
|
|
|
|
|
- if (guest_cpuid_has_longmode(vcpu))
|
|
|
|
|
|
+ if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
|
kvm_x86_ops->set_efer(vcpu, 0);
|
|
kvm_x86_ops->set_efer(vcpu, 0);
|
|
|
|
|
|
kvm_update_cpuid(vcpu);
|
|
kvm_update_cpuid(vcpu);
|
|
@@ -7424,7 +7425,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|
int pending_vec, max_bits, idx;
|
|
int pending_vec, max_bits, idx;
|
|
struct desc_ptr dt;
|
|
struct desc_ptr dt;
|
|
|
|
|
|
- if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
|
|
|
|
|
|
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
|
|
|
|
+ (sregs->cr4 & X86_CR4_OSXSAVE))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
dt.size = sregs->idt.limit;
|
|
dt.size = sregs->idt.limit;
|