|
@@ -3,6 +3,7 @@
|
|
|
|
|
|
#include "x86.h"
|
|
|
#include <asm/cpu.h>
|
|
|
+#include <asm/processor.h>
|
|
|
|
|
|
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
|
|
|
bool kvm_mpx_supported(void);
|
|
@@ -29,95 +30,78 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
|
|
|
return vcpu->arch.maxphyaddr;
|
|
|
}
|
|
|
|
|
|
-static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
+struct cpuid_reg {
|
|
|
+ u32 function;
|
|
|
+ u32 index;
|
|
|
+ int reg;
|
|
|
+};
|
|
|
|
|
|
- if (!static_cpu_has(X86_FEATURE_XSAVE))
|
|
|
- return false;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
|
- return best && (best->ecx & bit(X86_FEATURE_XSAVE));
|
|
|
-}
|
|
|
+static const struct cpuid_reg reverse_cpuid[] = {
|
|
|
+ [CPUID_1_EDX] = { 1, 0, CPUID_EDX},
|
|
|
+ [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
|
|
|
+ [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
|
|
|
+ [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
|
|
|
+ [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
|
|
|
+ [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
|
|
|
+ [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
|
|
|
+ [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
|
|
|
+ [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
|
|
|
+ [CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX},
|
|
|
+ [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
|
|
|
+ [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
|
|
|
+ [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
|
|
|
+ [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
|
|
|
+ [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
|
|
|
+};
|
|
|
|
|
|
-static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
|
|
|
+static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
|
|
|
{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
+ unsigned x86_leaf = x86_feature / 32;
|
|
|
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
|
- return best && (best->edx & bit(X86_FEATURE_MTRR));
|
|
|
-}
|
|
|
+ BUILD_BUG_ON(!__builtin_constant_p(x86_leaf));
|
|
|
+ BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
|
|
|
+ BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
|
|
|
|
|
|
-static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
|
- return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
|
|
|
-}
|
|
|
-
|
|
|
-static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
|
- return best && (best->ebx & bit(X86_FEATURE_SMEP));
|
|
|
+ return reverse_cpuid[x86_leaf];
|
|
|
}
|
|
|
|
|
|
-static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
|
|
|
+static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
|
|
|
{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
|
- return best && (best->ebx & bit(X86_FEATURE_SMAP));
|
|
|
-}
|
|
|
-
|
|
|
-static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
|
- return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
|
|
|
-}
|
|
|
-
|
|
|
-static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
|
- return best && (best->ecx & bit(X86_FEATURE_PKU));
|
|
|
-}
|
|
|
-
|
|
|
-static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
|
- return best && (best->edx & bit(X86_FEATURE_LM));
|
|
|
-}
|
|
|
+ struct kvm_cpuid_entry2 *entry;
|
|
|
+ const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
|
|
|
|
|
|
-static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
+ entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
|
|
|
+ if (!entry)
|
|
|
+ return NULL;
|
|
|
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
|
- return best && (best->ecx & bit(X86_FEATURE_OSVW));
|
|
|
+ switch (cpuid.reg) {
|
|
|
+ case CPUID_EAX:
|
|
|
+ return &entry->eax;
|
|
|
+ case CPUID_EBX:
|
|
|
+ return &entry->ebx;
|
|
|
+ case CPUID_ECX:
|
|
|
+ return &entry->ecx;
|
|
|
+ case CPUID_EDX:
|
|
|
+ return &entry->edx;
|
|
|
+ default:
|
|
|
+ BUILD_BUG();
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
|
|
|
+static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
|
|
|
{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
+ int *reg;
|
|
|
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
|
- return best && (best->ecx & bit(X86_FEATURE_PCID));
|
|
|
-}
|
|
|
+ if (x86_feature == X86_FEATURE_XSAVE &&
|
|
|
+ !static_cpu_has(X86_FEATURE_XSAVE))
|
|
|
+ return false;
|
|
|
|
|
|
-static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
+ reg = guest_cpuid_get_register(vcpu, x86_feature);
|
|
|
+ if (!reg)
|
|
|
+ return false;
|
|
|
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
|
- return best && (best->ecx & bit(X86_FEATURE_X2APIC));
|
|
|
+ return *reg & bit(x86_feature);
|
|
|
}
|
|
|
|
|
|
static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
|
|
@@ -128,46 +112,6 @@ static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
|
|
|
return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
|
|
|
}
|
|
|
|
|
|
-static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
|
- return best && (best->edx & bit(X86_FEATURE_GBPAGES));
|
|
|
-}
|
|
|
-
|
|
|
-static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
|
- return best && (best->ebx & bit(X86_FEATURE_RTM));
|
|
|
-}
|
|
|
-
|
|
|
-static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
|
- return best && (best->ebx & bit(X86_FEATURE_MPX));
|
|
|
-}
|
|
|
-
|
|
|
-static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
|
- return best && (best->edx & bit(X86_FEATURE_RDTSCP));
|
|
|
-}
|
|
|
-
|
|
|
-static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvm_cpuid_entry2 *best;
|
|
|
-
|
|
|
- best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
|
|
|
- return best && (best->edx & bit(X86_FEATURE_NRIPS));
|
|
|
-}
|
|
|
-
|
|
|
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct kvm_cpuid_entry2 *best;
|