|
@@ -48,6 +48,7 @@
|
|
#include <asm/kexec.h>
|
|
#include <asm/kexec.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/irq_remapping.h>
|
|
#include <asm/irq_remapping.h>
|
|
|
|
+#include <asm/mmu_context.h>
|
|
|
|
|
|
#include "trace.h"
|
|
#include "trace.h"
|
|
#include "pmu.h"
|
|
#include "pmu.h"
|
|
@@ -596,6 +597,7 @@ struct vcpu_vmx {
|
|
int gs_ldt_reload_needed;
|
|
int gs_ldt_reload_needed;
|
|
int fs_reload_needed;
|
|
int fs_reload_needed;
|
|
u64 msr_host_bndcfgs;
|
|
u64 msr_host_bndcfgs;
|
|
|
|
+ unsigned long vmcs_host_cr3; /* May not match real cr3 */
|
|
unsigned long vmcs_host_cr4; /* May not match real cr4 */
|
|
unsigned long vmcs_host_cr4; /* May not match real cr4 */
|
|
} host_state;
|
|
} host_state;
|
|
struct {
|
|
struct {
|
|
@@ -5012,12 +5014,19 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
|
|
u32 low32, high32;
|
|
u32 low32, high32;
|
|
unsigned long tmpl;
|
|
unsigned long tmpl;
|
|
struct desc_ptr dt;
|
|
struct desc_ptr dt;
|
|
- unsigned long cr0, cr4;
|
|
|
|
|
|
+ unsigned long cr0, cr3, cr4;
|
|
|
|
|
|
cr0 = read_cr0();
|
|
cr0 = read_cr0();
|
|
WARN_ON(cr0 & X86_CR0_TS);
|
|
WARN_ON(cr0 & X86_CR0_TS);
|
|
vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
|
|
vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
|
|
- vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
|
|
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Save the most likely value for this task's CR3 in the VMCS.
|
|
|
|
+ * We can't use __get_current_cr3_fast() because we're not atomic.
|
|
|
|
+ */
|
|
|
|
+ cr3 = read_cr3();
|
|
|
|
+ vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
|
|
|
|
+ vmx->host_state.vmcs_host_cr3 = cr3;
|
|
|
|
|
|
/* Save the most likely value for this task's CR4 in the VMCS. */
|
|
/* Save the most likely value for this task's CR4 in the VMCS. */
|
|
cr4 = cr4_read_shadow();
|
|
cr4 = cr4_read_shadow();
|
|
@@ -8820,7 +8829,7 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
|
|
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
- unsigned long debugctlmsr, cr4;
|
|
|
|
|
|
+ unsigned long debugctlmsr, cr3, cr4;
|
|
|
|
|
|
/* Don't enter VMX if guest state is invalid, let the exit handler
|
|
/* Don't enter VMX if guest state is invalid, let the exit handler
|
|
start emulation until we arrive back to a valid state */
|
|
start emulation until we arrive back to a valid state */
|
|
@@ -8842,6 +8851,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
|
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
|
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
|
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
|
|
|
|
|
|
|
+ cr3 = __get_current_cr3_fast();
|
|
|
|
+ if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) {
|
|
|
|
+ vmcs_writel(HOST_CR3, cr3);
|
|
|
|
+ vmx->host_state.vmcs_host_cr3 = cr3;
|
|
|
|
+ }
|
|
|
|
+
|
|
cr4 = cr4_read_shadow();
|
|
cr4 = cr4_read_shadow();
|
|
if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
|
|
if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
|
|
vmcs_writel(HOST_CR4, cr4);
|
|
vmcs_writel(HOST_CR4, cr4);
|