|
@@ -1103,11 +1103,35 @@ static u32 msr_based_features[] = {
|
|
|
|
|
|
static unsigned int num_msr_based_features;
|
|
|
|
|
|
+u64 kvm_get_arch_capabilities(void)
|
|
|
+{
|
|
|
+ u64 data;
|
|
|
+
|
|
|
+ rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we're doing cache flushes (either "always" or "cond")
|
|
|
+ * we will do one whenever the guest does a vmlaunch/vmresume.
|
|
|
+ * If an outer hypervisor is doing the cache flush for us
|
|
|
+ * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
|
|
|
+ * capability to the guest too, and if EPT is disabled we're not
|
|
|
+ * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
|
|
|
+ * require a nested hypervisor to do a flush of its own.
|
|
|
+ */
|
|
|
+ if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
|
|
|
+ data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
|
|
|
+
|
|
|
+ return data;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
|
|
|
+
|
|
|
static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
|
|
|
{
|
|
|
switch (msr->index) {
|
|
|
- case MSR_IA32_UCODE_REV:
|
|
|
case MSR_IA32_ARCH_CAPABILITIES:
|
|
|
+ msr->data = kvm_get_arch_capabilities();
|
|
|
+ break;
|
|
|
+ case MSR_IA32_UCODE_REV:
|
|
|
rdmsrl_safe(msr->index, &msr->data);
|
|
|
break;
|
|
|
default:
|