|
@@ -2571,6 +2571,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
#ifdef CONFIG_X86_64
|
|
|
int cpu = raw_smp_processor_id();
|
|
|
+ unsigned long fs_base, kernel_gs_base;
|
|
|
#endif
|
|
|
int i;
|
|
|
|
|
@@ -2586,12 +2587,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
|
|
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
- save_fsgs_for_kvm();
|
|
|
- vmx->host_state.fs_sel = current->thread.fsindex;
|
|
|
- vmx->host_state.gs_sel = current->thread.gsindex;
|
|
|
-#else
|
|
|
- savesegment(fs, vmx->host_state.fs_sel);
|
|
|
- savesegment(gs, vmx->host_state.gs_sel);
|
|
|
+ if (likely(is_64bit_mm(current->mm))) {
|
|
|
+ save_fsgs_for_kvm();
|
|
|
+ vmx->host_state.fs_sel = current->thread.fsindex;
|
|
|
+ vmx->host_state.gs_sel = current->thread.gsindex;
|
|
|
+ fs_base = current->thread.fsbase;
|
|
|
+ kernel_gs_base = current->thread.gsbase;
|
|
|
+ } else {
|
|
|
+#endif
|
|
|
+ savesegment(fs, vmx->host_state.fs_sel);
|
|
|
+ savesegment(gs, vmx->host_state.gs_sel);
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+ fs_base = read_msr(MSR_FS_BASE);
|
|
|
+ kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
|
|
|
+ }
|
|
|
#endif
|
|
|
if (!(vmx->host_state.fs_sel & 7)) {
|
|
|
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
|
|
@@ -2611,10 +2620,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
|
|
savesegment(ds, vmx->host_state.ds_sel);
|
|
|
savesegment(es, vmx->host_state.es_sel);
|
|
|
|
|
|
- vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
|
|
|
+ vmcs_writel(HOST_FS_BASE, fs_base);
|
|
|
vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
|
|
|
|
|
|
- vmx->msr_host_kernel_gs_base = current->thread.gsbase;
|
|
|
+ vmx->msr_host_kernel_gs_base = kernel_gs_base;
|
|
|
if (is_long_mode(&vmx->vcpu))
|
|
|
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
|
|
#else
|
|
@@ -4322,11 +4331,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
|
|
|
vmcs_conf->order = get_order(vmcs_conf->size);
|
|
|
vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
|
|
|
|
|
|
- /* KVM supports Enlightened VMCS v1 only */
|
|
|
- if (static_branch_unlikely(&enable_evmcs))
|
|
|
- vmcs_conf->revision_id = KVM_EVMCS_VERSION;
|
|
|
- else
|
|
|
- vmcs_conf->revision_id = vmx_msr_low;
|
|
|
+ vmcs_conf->revision_id = vmx_msr_low;
|
|
|
|
|
|
vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
|
|
|
vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
|
|
@@ -4396,7 +4401,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
|
|
|
return NULL;
|
|
|
vmcs = page_address(pages);
|
|
|
memset(vmcs, 0, vmcs_config.size);
|
|
|
- vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
|
|
|
+
|
|
|
+ /* KVM supports Enlightened VMCS v1 only */
|
|
|
+ if (static_branch_unlikely(&enable_evmcs))
|
|
|
+ vmcs->revision_id = KVM_EVMCS_VERSION;
|
|
|
+ else
|
|
|
+ vmcs->revision_id = vmcs_config.revision_id;
|
|
|
+
|
|
|
return vmcs;
|
|
|
}
|
|
|
|
|
@@ -4564,6 +4575,19 @@ static __init int alloc_kvm_area(void)
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * When eVMCS is enabled, alloc_vmcs_cpu() sets
|
|
|
+ * vmcs->revision_id to KVM_EVMCS_VERSION instead of
|
|
|
+ * revision_id reported by MSR_IA32_VMX_BASIC.
|
|
|
+ *
|
|
|
+ * However, even though not explictly documented by
|
|
|
+ * TLFS, VMXArea passed as VMXON argument should
|
|
|
+ * still be marked with revision_id reported by
|
|
|
+ * physical CPU.
|
|
|
+ */
|
|
|
+ if (static_branch_unlikely(&enable_evmcs))
|
|
|
+ vmcs->revision_id = vmcs_config.revision_id;
|
|
|
+
|
|
|
per_cpu(vmxarea, cpu) = vmcs;
|
|
|
}
|
|
|
return 0;
|
|
@@ -11753,7 +11777,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
- u32 msr_entry_idx;
|
|
|
u32 exit_qual;
|
|
|
int r;
|
|
|
|
|
@@ -11775,10 +11798,10 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
|
|
|
nested_get_vmcs12_pages(vcpu, vmcs12);
|
|
|
|
|
|
r = EXIT_REASON_MSR_LOAD_FAIL;
|
|
|
- msr_entry_idx = nested_vmx_load_msr(vcpu,
|
|
|
- vmcs12->vm_entry_msr_load_addr,
|
|
|
- vmcs12->vm_entry_msr_load_count);
|
|
|
- if (msr_entry_idx)
|
|
|
+ exit_qual = nested_vmx_load_msr(vcpu,
|
|
|
+ vmcs12->vm_entry_msr_load_addr,
|
|
|
+ vmcs12->vm_entry_msr_load_count);
|
|
|
+ if (exit_qual)
|
|
|
goto fail;
|
|
|
|
|
|
/*
|