|
@@ -472,6 +472,7 @@ struct vcpu_vmx {
|
|
int gs_ldt_reload_needed;
|
|
int gs_ldt_reload_needed;
|
|
int fs_reload_needed;
|
|
int fs_reload_needed;
|
|
u64 msr_host_bndcfgs;
|
|
u64 msr_host_bndcfgs;
|
|
|
|
+ unsigned long vmcs_host_cr4; /* May not match real cr4 */
|
|
} host_state;
|
|
} host_state;
|
|
struct {
|
|
struct {
|
|
int vm86_active;
|
|
int vm86_active;
|
|
@@ -4267,11 +4268,16 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
|
|
u32 low32, high32;
|
|
u32 low32, high32;
|
|
unsigned long tmpl;
|
|
unsigned long tmpl;
|
|
struct desc_ptr dt;
|
|
struct desc_ptr dt;
|
|
|
|
+ unsigned long cr4;
|
|
|
|
|
|
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
|
|
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
|
|
- vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
|
|
|
|
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
|
|
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
|
|
|
|
|
|
|
|
+ /* Save the most likely value for this task's CR4 in the VMCS. */
|
|
|
|
+ cr4 = read_cr4();
|
|
|
|
+ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
|
|
|
|
+ vmx->host_state.vmcs_host_cr4 = cr4;
|
|
|
|
+
|
|
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
|
|
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
|
|
#ifdef CONFIG_X86_64
|
|
#ifdef CONFIG_X86_64
|
|
/*
|
|
/*
|
|
@@ -7514,7 +7520,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
|
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
- unsigned long debugctlmsr;
|
|
|
|
|
|
+ unsigned long debugctlmsr, cr4;
|
|
|
|
|
|
/* Record the guest's net vcpu time for enforced NMI injections. */
|
|
/* Record the guest's net vcpu time for enforced NMI injections. */
|
|
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
|
|
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
|
|
@@ -7540,6 +7546,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
|
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
|
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
|
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
|
|
|
|
|
|
|
+ cr4 = read_cr4();
|
|
|
|
+ if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
|
|
|
|
+ vmcs_writel(HOST_CR4, cr4);
|
|
|
|
+ vmx->host_state.vmcs_host_cr4 = cr4;
|
|
|
|
+ }
|
|
|
|
+
|
|
/* When single-stepping over STI and MOV SS, we must clear the
|
|
/* When single-stepping over STI and MOV SS, we must clear the
|
|
* corresponding interruptibility bits in the guest state. Otherwise
|
|
* corresponding interruptibility bits in the guest state. Otherwise
|
|
* vmentry fails as it then expects bit 14 (BS) in pending debug
|
|
* vmentry fails as it then expects bit 14 (BS) in pending debug
|