|
@@ -32,26 +32,27 @@
|
|
|
#include <asm/desc.h>
|
|
|
#include <asm/vmx.h>
|
|
|
#include <asm/virtext.h>
|
|
|
+#include <asm/mce.h>
|
|
|
|
|
|
#define __ex(x) __kvm_handle_fault_on_reboot(x)
|
|
|
|
|
|
MODULE_AUTHOR("Qumranet");
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
|
-static int bypass_guest_pf = 1;
|
|
|
-module_param(bypass_guest_pf, bool, 0);
|
|
|
+static int __read_mostly bypass_guest_pf = 1;
|
|
|
+module_param(bypass_guest_pf, bool, S_IRUGO);
|
|
|
|
|
|
-static int enable_vpid = 1;
|
|
|
-module_param(enable_vpid, bool, 0);
|
|
|
+static int __read_mostly enable_vpid = 1;
|
|
|
+module_param_named(vpid, enable_vpid, bool, 0444);
|
|
|
|
|
|
-static int flexpriority_enabled = 1;
|
|
|
-module_param(flexpriority_enabled, bool, 0);
|
|
|
+static int __read_mostly flexpriority_enabled = 1;
|
|
|
+module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
|
|
|
|
|
|
-static int enable_ept = 1;
|
|
|
-module_param(enable_ept, bool, 0);
|
|
|
+static int __read_mostly enable_ept = 1;
|
|
|
+module_param_named(ept, enable_ept, bool, S_IRUGO);
|
|
|
|
|
|
-static int emulate_invalid_guest_state = 0;
|
|
|
-module_param(emulate_invalid_guest_state, bool, 0);
|
|
|
+static int __read_mostly emulate_invalid_guest_state = 0;
|
|
|
+module_param(emulate_invalid_guest_state, bool, S_IRUGO);
|
|
|
|
|
|
struct vmcs {
|
|
|
u32 revision_id;
|
|
@@ -97,6 +98,7 @@ struct vcpu_vmx {
|
|
|
int soft_vnmi_blocked;
|
|
|
ktime_t entry_time;
|
|
|
s64 vnmi_blocked_time;
|
|
|
+ u32 exit_reason;
|
|
|
};
|
|
|
|
|
|
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
|
|
@@ -111,9 +113,10 @@ static DEFINE_PER_CPU(struct vmcs *, vmxarea);
|
|
|
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
|
|
|
static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
|
|
|
|
|
|
-static struct page *vmx_io_bitmap_a;
|
|
|
-static struct page *vmx_io_bitmap_b;
|
|
|
-static struct page *vmx_msr_bitmap;
|
|
|
+static unsigned long *vmx_io_bitmap_a;
|
|
|
+static unsigned long *vmx_io_bitmap_b;
|
|
|
+static unsigned long *vmx_msr_bitmap_legacy;
|
|
|
+static unsigned long *vmx_msr_bitmap_longmode;
|
|
|
|
|
|
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
|
|
|
static DEFINE_SPINLOCK(vmx_vpid_lock);
|
|
@@ -213,70 +216,78 @@ static inline int is_external_interrupt(u32 intr_info)
|
|
|
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
|
|
|
}
|
|
|
|
|
|
+static inline int is_machine_check(u32 intr_info)
|
|
|
+{
|
|
|
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
|
|
|
+ INTR_INFO_VALID_MASK)) ==
|
|
|
+ (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
|
|
|
+}
|
|
|
+
|
|
|
static inline int cpu_has_vmx_msr_bitmap(void)
|
|
|
{
|
|
|
- return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS);
|
|
|
+ return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
|
|
|
}
|
|
|
|
|
|
static inline int cpu_has_vmx_tpr_shadow(void)
|
|
|
{
|
|
|
- return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
|
|
|
+ return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
|
|
|
}
|
|
|
|
|
|
static inline int vm_need_tpr_shadow(struct kvm *kvm)
|
|
|
{
|
|
|
- return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
|
|
|
+ return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
|
|
|
}
|
|
|
|
|
|
static inline int cpu_has_secondary_exec_ctrls(void)
|
|
|
{
|
|
|
- return (vmcs_config.cpu_based_exec_ctrl &
|
|
|
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
|
|
|
+ return vmcs_config.cpu_based_exec_ctrl &
|
|
|
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
|
|
|
}
|
|
|
|
|
|
static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
|
|
|
{
|
|
|
- return flexpriority_enabled
|
|
|
- && (vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
|
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
|
|
|
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
|
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
|
|
+}
|
|
|
+
|
|
|
+static inline bool cpu_has_vmx_flexpriority(void)
|
|
|
+{
|
|
|
+ return cpu_has_vmx_tpr_shadow() &&
|
|
|
+ cpu_has_vmx_virtualize_apic_accesses();
|
|
|
}
|
|
|
|
|
|
static inline int cpu_has_vmx_invept_individual_addr(void)
|
|
|
{
|
|
|
- return (!!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT));
|
|
|
+ return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
|
|
|
}
|
|
|
|
|
|
static inline int cpu_has_vmx_invept_context(void)
|
|
|
{
|
|
|
- return (!!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT));
|
|
|
+ return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT);
|
|
|
}
|
|
|
|
|
|
static inline int cpu_has_vmx_invept_global(void)
|
|
|
{
|
|
|
- return (!!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT));
|
|
|
+ return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT);
|
|
|
}
|
|
|
|
|
|
static inline int cpu_has_vmx_ept(void)
|
|
|
{
|
|
|
- return (vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
|
- SECONDARY_EXEC_ENABLE_EPT);
|
|
|
-}
|
|
|
-
|
|
|
-static inline int vm_need_ept(void)
|
|
|
-{
|
|
|
- return (cpu_has_vmx_ept() && enable_ept);
|
|
|
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
|
+ SECONDARY_EXEC_ENABLE_EPT;
|
|
|
}
|
|
|
|
|
|
static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
|
|
|
{
|
|
|
- return ((cpu_has_vmx_virtualize_apic_accesses()) &&
|
|
|
- (irqchip_in_kernel(kvm)));
|
|
|
+ return flexpriority_enabled &&
|
|
|
+ (cpu_has_vmx_virtualize_apic_accesses()) &&
|
|
|
+ (irqchip_in_kernel(kvm));
|
|
|
}
|
|
|
|
|
|
static inline int cpu_has_vmx_vpid(void)
|
|
|
{
|
|
|
- return (vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
|
- SECONDARY_EXEC_ENABLE_VPID);
|
|
|
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
|
+ SECONDARY_EXEC_ENABLE_VPID;
|
|
|
}
|
|
|
|
|
|
static inline int cpu_has_virtual_nmis(void)
|
|
@@ -284,6 +295,11 @@ static inline int cpu_has_virtual_nmis(void)
|
|
|
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
|
|
|
}
|
|
|
|
|
|
+static inline bool report_flexpriority(void)
|
|
|
+{
|
|
|
+ return flexpriority_enabled;
|
|
|
+}
|
|
|
+
|
|
|
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
|
|
|
{
|
|
|
int i;
|
|
@@ -381,7 +397,7 @@ static inline void ept_sync_global(void)
|
|
|
|
|
|
static inline void ept_sync_context(u64 eptp)
|
|
|
{
|
|
|
- if (vm_need_ept()) {
|
|
|
+ if (enable_ept) {
|
|
|
if (cpu_has_vmx_invept_context())
|
|
|
__invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
|
|
|
else
|
|
@@ -391,7 +407,7 @@ static inline void ept_sync_context(u64 eptp)
|
|
|
|
|
|
static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
|
|
|
{
|
|
|
- if (vm_need_ept()) {
|
|
|
+ if (enable_ept) {
|
|
|
if (cpu_has_vmx_invept_individual_addr())
|
|
|
__invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
|
|
|
eptp, gpa);
|
|
@@ -478,7 +494,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
u32 eb;
|
|
|
|
|
|
- eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
|
|
|
+ eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
|
|
|
if (!vcpu->fpu_active)
|
|
|
eb |= 1u << NM_VECTOR;
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
|
|
@@ -488,9 +504,9 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
|
|
|
eb |= 1u << BP_VECTOR;
|
|
|
}
|
|
|
- if (vcpu->arch.rmode.active)
|
|
|
+ if (vcpu->arch.rmode.vm86_active)
|
|
|
eb = ~0;
|
|
|
- if (vm_need_ept())
|
|
|
+ if (enable_ept)
|
|
|
eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
|
|
|
vmcs_write32(EXCEPTION_BITMAP, eb);
|
|
|
}
|
|
@@ -724,29 +740,50 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|
|
{
|
|
|
- if (vcpu->arch.rmode.active)
|
|
|
+ if (vcpu->arch.rmode.vm86_active)
|
|
|
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
|
|
|
vmcs_writel(GUEST_RFLAGS, rflags);
|
|
|
}
|
|
|
|
|
|
+static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
|
|
|
+{
|
|
|
+ u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (interruptibility & GUEST_INTR_STATE_STI)
|
|
|
+ ret |= X86_SHADOW_INT_STI;
|
|
|
+ if (interruptibility & GUEST_INTR_STATE_MOV_SS)
|
|
|
+ ret |= X86_SHADOW_INT_MOV_SS;
|
|
|
+
|
|
|
+ return ret & mask;
|
|
|
+}
|
|
|
+
|
|
|
+static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
|
|
|
+{
|
|
|
+ u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
|
|
+ u32 interruptibility = interruptibility_old;
|
|
|
+
|
|
|
+ interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
|
|
|
+
|
|
|
+ if (mask & X86_SHADOW_INT_MOV_SS)
|
|
|
+ interruptibility |= GUEST_INTR_STATE_MOV_SS;
|
|
|
+ if (mask & X86_SHADOW_INT_STI)
|
|
|
+ interruptibility |= GUEST_INTR_STATE_STI;
|
|
|
+
|
|
|
+ if ((interruptibility != interruptibility_old))
|
|
|
+ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
|
|
|
+}
|
|
|
+
|
|
|
static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
unsigned long rip;
|
|
|
- u32 interruptibility;
|
|
|
|
|
|
rip = kvm_rip_read(vcpu);
|
|
|
rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
|
|
|
kvm_rip_write(vcpu, rip);
|
|
|
|
|
|
- /*
|
|
|
- * We emulated an instruction, so temporary interrupt blocking
|
|
|
- * should be removed, if set.
|
|
|
- */
|
|
|
- interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
|
|
- if (interruptibility & 3)
|
|
|
- vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
|
|
|
- interruptibility & ~3);
|
|
|
- vcpu->arch.interrupt_window_open = 1;
|
|
|
+ /* skipping an emulated instruction also counts */
|
|
|
+ vmx_set_interrupt_shadow(vcpu, 0);
|
|
|
}
|
|
|
|
|
|
static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
|
|
@@ -760,7 +797,7 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
|
|
|
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
|
|
|
}
|
|
|
|
|
|
- if (vcpu->arch.rmode.active) {
|
|
|
+ if (vcpu->arch.rmode.vm86_active) {
|
|
|
vmx->rmode.irq.pending = true;
|
|
|
vmx->rmode.irq.vector = nr;
|
|
|
vmx->rmode.irq.rip = kvm_rip_read(vcpu);
|
|
@@ -773,8 +810,9 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- if (nr == BP_VECTOR || nr == OF_VECTOR) {
|
|
|
- vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
|
|
|
+ if (kvm_exception_is_soft(nr)) {
|
|
|
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
|
|
|
+ vmx->vcpu.arch.event_exit_inst_len);
|
|
|
intr_info |= INTR_TYPE_SOFT_EXCEPTION;
|
|
|
} else
|
|
|
intr_info |= INTR_TYPE_HARD_EXCEPTION;
|
|
@@ -782,11 +820,6 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
|
|
|
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
|
|
|
}
|
|
|
|
|
|
-static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- return false;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Swap MSR entry in host/guest MSR entry array.
|
|
|
*/
|
|
@@ -812,6 +845,7 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
|
|
|
static void setup_msrs(struct vcpu_vmx *vmx)
|
|
|
{
|
|
|
int save_nmsrs;
|
|
|
+ unsigned long *msr_bitmap;
|
|
|
|
|
|
vmx_load_host_state(vmx);
|
|
|
save_nmsrs = 0;
|
|
@@ -847,6 +881,15 @@ static void setup_msrs(struct vcpu_vmx *vmx)
|
|
|
__find_msr_index(vmx, MSR_KERNEL_GS_BASE);
|
|
|
#endif
|
|
|
vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
|
|
|
+
|
|
|
+ if (cpu_has_vmx_msr_bitmap()) {
|
|
|
+ if (is_long_mode(&vmx->vcpu))
|
|
|
+ msr_bitmap = vmx_msr_bitmap_longmode;
|
|
|
+ else
|
|
|
+ msr_bitmap = vmx_msr_bitmap_legacy;
|
|
|
+
|
|
|
+ vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1034,13 +1077,6 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int vmx_get_irq(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- if (!vcpu->arch.interrupt.pending)
|
|
|
- return -1;
|
|
|
- return vcpu->arch.interrupt.nr;
|
|
|
-}
|
|
|
-
|
|
|
static __init int cpu_has_kvm_support(void)
|
|
|
{
|
|
|
return cpu_has_vmx();
|
|
@@ -1294,6 +1330,18 @@ static __init int hardware_setup(void)
|
|
|
if (boot_cpu_has(X86_FEATURE_NX))
|
|
|
kvm_enable_efer_bits(EFER_NX);
|
|
|
|
|
|
+ if (!cpu_has_vmx_vpid())
|
|
|
+ enable_vpid = 0;
|
|
|
+
|
|
|
+ if (!cpu_has_vmx_ept())
|
|
|
+ enable_ept = 0;
|
|
|
+
|
|
|
+ if (!cpu_has_vmx_flexpriority())
|
|
|
+ flexpriority_enabled = 0;
|
|
|
+
|
|
|
+ if (!cpu_has_vmx_tpr_shadow())
|
|
|
+ kvm_x86_ops->update_cr8_intercept = NULL;
|
|
|
+
|
|
|
return alloc_kvm_area();
|
|
|
}
|
|
|
|
|
@@ -1324,7 +1372,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
vmx->emulation_required = 1;
|
|
|
- vcpu->arch.rmode.active = 0;
|
|
|
+ vcpu->arch.rmode.vm86_active = 0;
|
|
|
|
|
|
vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base);
|
|
|
vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit);
|
|
@@ -1386,7 +1434,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
vmx->emulation_required = 1;
|
|
|
- vcpu->arch.rmode.active = 1;
|
|
|
+ vcpu->arch.rmode.vm86_active = 1;
|
|
|
|
|
|
vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
|
|
|
vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
|
|
@@ -1485,7 +1533,7 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
|
|
|
static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
vpid_sync_vcpu_all(to_vmx(vcpu));
|
|
|
- if (vm_need_ept())
|
|
|
+ if (enable_ept)
|
|
|
ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
|
|
|
}
|
|
|
|
|
@@ -1555,10 +1603,10 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|
|
|
|
|
vmx_fpu_deactivate(vcpu);
|
|
|
|
|
|
- if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE))
|
|
|
+ if (vcpu->arch.rmode.vm86_active && (cr0 & X86_CR0_PE))
|
|
|
enter_pmode(vcpu);
|
|
|
|
|
|
- if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE))
|
|
|
+ if (!vcpu->arch.rmode.vm86_active && !(cr0 & X86_CR0_PE))
|
|
|
enter_rmode(vcpu);
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
@@ -1570,7 +1618,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- if (vm_need_ept())
|
|
|
+ if (enable_ept)
|
|
|
ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
|
|
|
|
|
|
vmcs_writel(CR0_READ_SHADOW, cr0);
|
|
@@ -1599,7 +1647,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
|
|
u64 eptp;
|
|
|
|
|
|
guest_cr3 = cr3;
|
|
|
- if (vm_need_ept()) {
|
|
|
+ if (enable_ept) {
|
|
|
eptp = construct_eptp(cr3);
|
|
|
vmcs_write64(EPT_POINTER, eptp);
|
|
|
ept_sync_context(eptp);
|
|
@@ -1616,11 +1664,11 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
|
|
|
|
|
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|
|
{
|
|
|
- unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.active ?
|
|
|
+ unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.vm86_active ?
|
|
|
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
|
|
|
|
|
|
vcpu->arch.cr4 = cr4;
|
|
|
- if (vm_need_ept())
|
|
|
+ if (enable_ept)
|
|
|
ept_update_paging_mode_cr4(&hw_cr4, vcpu);
|
|
|
|
|
|
vmcs_writel(CR4_READ_SHADOW, cr4);
|
|
@@ -1699,7 +1747,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
|
|
|
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
|
|
|
u32 ar;
|
|
|
|
|
|
- if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) {
|
|
|
+ if (vcpu->arch.rmode.vm86_active && seg == VCPU_SREG_TR) {
|
|
|
vcpu->arch.rmode.tr.selector = var->selector;
|
|
|
vcpu->arch.rmode.tr.base = var->base;
|
|
|
vcpu->arch.rmode.tr.limit = var->limit;
|
|
@@ -1709,7 +1757,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
|
|
|
vmcs_writel(sf->base, var->base);
|
|
|
vmcs_write32(sf->limit, var->limit);
|
|
|
vmcs_write16(sf->selector, var->selector);
|
|
|
- if (vcpu->arch.rmode.active && var->s) {
|
|
|
+ if (vcpu->arch.rmode.vm86_active && var->s) {
|
|
|
/*
|
|
|
* Hack real-mode segments into vm86 compatibility.
|
|
|
*/
|
|
@@ -1982,7 +2030,7 @@ static int init_rmode_identity_map(struct kvm *kvm)
|
|
|
pfn_t identity_map_pfn;
|
|
|
u32 tmp;
|
|
|
|
|
|
- if (!vm_need_ept())
|
|
|
+ if (!enable_ept)
|
|
|
return 1;
|
|
|
if (unlikely(!kvm->arch.ept_identity_pagetable)) {
|
|
|
printk(KERN_ERR "EPT: identity-mapping pagetable "
|
|
@@ -2071,7 +2119,7 @@ static void allocate_vpid(struct vcpu_vmx *vmx)
|
|
|
int vpid;
|
|
|
|
|
|
vmx->vpid = 0;
|
|
|
- if (!enable_vpid || !cpu_has_vmx_vpid())
|
|
|
+ if (!enable_vpid)
|
|
|
return;
|
|
|
spin_lock(&vmx_vpid_lock);
|
|
|
vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
|
|
@@ -2082,9 +2130,9 @@ static void allocate_vpid(struct vcpu_vmx *vmx)
|
|
|
spin_unlock(&vmx_vpid_lock);
|
|
|
}
|
|
|
|
|
|
-static void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
|
|
|
+static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
|
|
|
{
|
|
|
- void *va;
|
|
|
+ int f = sizeof(unsigned long);
|
|
|
|
|
|
if (!cpu_has_vmx_msr_bitmap())
|
|
|
return;
|
|
@@ -2094,16 +2142,21 @@ static void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
|
|
|
* have the write-low and read-high bitmap offsets the wrong way round.
|
|
|
* We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
|
|
|
*/
|
|
|
- va = kmap(msr_bitmap);
|
|
|
if (msr <= 0x1fff) {
|
|
|
- __clear_bit(msr, va + 0x000); /* read-low */
|
|
|
- __clear_bit(msr, va + 0x800); /* write-low */
|
|
|
+ __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
|
|
|
+ __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
|
|
|
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
|
|
|
msr &= 0x1fff;
|
|
|
- __clear_bit(msr, va + 0x400); /* read-high */
|
|
|
- __clear_bit(msr, va + 0xc00); /* write-high */
|
|
|
+ __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
|
|
|
+ __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
|
|
|
}
|
|
|
- kunmap(msr_bitmap);
|
|
|
+}
|
|
|
+
|
|
|
+static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
|
|
|
+{
|
|
|
+ if (!longmode_only)
|
|
|
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
|
|
|
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2121,11 +2174,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
|
|
u32 exec_control;
|
|
|
|
|
|
/* I/O */
|
|
|
- vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
|
|
|
- vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
|
|
|
+ vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
|
|
|
+ vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
|
|
|
|
|
|
if (cpu_has_vmx_msr_bitmap())
|
|
|
- vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap));
|
|
|
+ vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
|
|
|
|
|
|
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
|
|
|
|
|
@@ -2141,7 +2194,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
|
|
CPU_BASED_CR8_LOAD_EXITING;
|
|
|
#endif
|
|
|
}
|
|
|
- if (!vm_need_ept())
|
|
|
+ if (!enable_ept)
|
|
|
exec_control |= CPU_BASED_CR3_STORE_EXITING |
|
|
|
CPU_BASED_CR3_LOAD_EXITING |
|
|
|
CPU_BASED_INVLPG_EXITING;
|
|
@@ -2154,7 +2207,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
|
|
~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
|
|
if (vmx->vpid == 0)
|
|
|
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
|
|
|
- if (!vm_need_ept())
|
|
|
+ if (!enable_ept)
|
|
|
exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
|
|
|
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
|
|
|
}
|
|
@@ -2273,7 +2326,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- vmx->vcpu.arch.rmode.active = 0;
|
|
|
+ vmx->vcpu.arch.rmode.vm86_active = 0;
|
|
|
|
|
|
vmx->soft_vnmi_blocked = 0;
|
|
|
|
|
@@ -2402,14 +2455,16 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
|
|
|
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
|
|
|
}
|
|
|
|
|
|
-static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
|
|
|
+static void vmx_inject_irq(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
+ uint32_t intr;
|
|
|
+ int irq = vcpu->arch.interrupt.nr;
|
|
|
|
|
|
KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler);
|
|
|
|
|
|
++vcpu->stat.irq_injections;
|
|
|
- if (vcpu->arch.rmode.active) {
|
|
|
+ if (vcpu->arch.rmode.vm86_active) {
|
|
|
vmx->rmode.irq.pending = true;
|
|
|
vmx->rmode.irq.vector = irq;
|
|
|
vmx->rmode.irq.rip = kvm_rip_read(vcpu);
|
|
@@ -2419,8 +2474,14 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
|
|
|
kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
|
|
|
return;
|
|
|
}
|
|
|
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
|
|
|
- irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
|
|
|
+ intr = irq | INTR_INFO_VALID_MASK;
|
|
|
+ if (vcpu->arch.interrupt.soft) {
|
|
|
+ intr |= INTR_TYPE_SOFT_INTR;
|
|
|
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
|
|
|
+ vmx->vcpu.arch.event_exit_inst_len);
|
|
|
+ } else
|
|
|
+ intr |= INTR_TYPE_EXT_INTR;
|
|
|
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
|
|
|
}
|
|
|
|
|
|
static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
|
|
@@ -2441,7 +2502,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
|
|
|
++vcpu->stat.nmi_injections;
|
|
|
- if (vcpu->arch.rmode.active) {
|
|
|
+ if (vcpu->arch.rmode.vm86_active) {
|
|
|
vmx->rmode.irq.pending = true;
|
|
|
vmx->rmode.irq.vector = NMI_VECTOR;
|
|
|
vmx->rmode.irq.rip = kvm_rip_read(vcpu);
|
|
@@ -2456,76 +2517,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
|
|
|
INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
|
|
|
}
|
|
|
|
|
|
-static void vmx_update_window_states(struct kvm_vcpu *vcpu)
|
|
|
+static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
|
|
-
|
|
|
- vcpu->arch.nmi_window_open =
|
|
|
- !(guest_intr & (GUEST_INTR_STATE_STI |
|
|
|
- GUEST_INTR_STATE_MOV_SS |
|
|
|
- GUEST_INTR_STATE_NMI));
|
|
|
if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
|
|
|
- vcpu->arch.nmi_window_open = 0;
|
|
|
-
|
|
|
- vcpu->arch.interrupt_window_open =
|
|
|
- ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
|
|
|
- !(guest_intr & (GUEST_INTR_STATE_STI |
|
|
|
- GUEST_INTR_STATE_MOV_SS)));
|
|
|
-}
|
|
|
-
|
|
|
-static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- int word_index = __ffs(vcpu->arch.irq_summary);
|
|
|
- int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
|
|
|
- int irq = word_index * BITS_PER_LONG + bit_index;
|
|
|
+ return 0;
|
|
|
|
|
|
- clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
|
|
|
- if (!vcpu->arch.irq_pending[word_index])
|
|
|
- clear_bit(word_index, &vcpu->arch.irq_summary);
|
|
|
- kvm_queue_interrupt(vcpu, irq);
|
|
|
+ return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
|
|
+ (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
|
|
|
+ GUEST_INTR_STATE_NMI));
|
|
|
}
|
|
|
|
|
|
-static void do_interrupt_requests(struct kvm_vcpu *vcpu,
|
|
|
- struct kvm_run *kvm_run)
|
|
|
+static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- vmx_update_window_states(vcpu);
|
|
|
-
|
|
|
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
|
|
- vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
|
- GUEST_INTR_STATE_STI |
|
|
|
- GUEST_INTR_STATE_MOV_SS);
|
|
|
-
|
|
|
- if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
|
|
|
- if (vcpu->arch.interrupt.pending) {
|
|
|
- enable_nmi_window(vcpu);
|
|
|
- } else if (vcpu->arch.nmi_window_open) {
|
|
|
- vcpu->arch.nmi_pending = false;
|
|
|
- vcpu->arch.nmi_injected = true;
|
|
|
- } else {
|
|
|
- enable_nmi_window(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
- }
|
|
|
- if (vcpu->arch.nmi_injected) {
|
|
|
- vmx_inject_nmi(vcpu);
|
|
|
- if (vcpu->arch.nmi_pending)
|
|
|
- enable_nmi_window(vcpu);
|
|
|
- else if (vcpu->arch.irq_summary
|
|
|
- || kvm_run->request_interrupt_window)
|
|
|
- enable_irq_window(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (vcpu->arch.interrupt_window_open) {
|
|
|
- if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
|
|
|
- kvm_do_inject_irq(vcpu);
|
|
|
-
|
|
|
- if (vcpu->arch.interrupt.pending)
|
|
|
- vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
|
|
|
- }
|
|
|
- if (!vcpu->arch.interrupt_window_open &&
|
|
|
- (vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
|
|
|
- enable_irq_window(vcpu);
|
|
|
+ return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
|
|
|
+ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
|
|
+ (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
|
|
|
}
|
|
|
|
|
|
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
|
|
@@ -2585,6 +2591,31 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Trigger machine check on the host. We assume all the MSRs are already set up
|
|
|
+ * by the CPU and that we still run on the same CPU as the MCE occurred on.
|
|
|
+ * We pass a fake environment to the machine check handler because we want
|
|
|
+ * the guest to be always treated like user space, no matter what context
|
|
|
+ * it used internally.
|
|
|
+ */
|
|
|
+static void kvm_machine_check(void)
|
|
|
+{
|
|
|
+#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
|
|
|
+ struct pt_regs regs = {
|
|
|
+ .cs = 3, /* Fake ring 3 no matter what the guest ran on */
|
|
|
+ .flags = X86_EFLAGS_IF,
|
|
|
+ };
|
|
|
+
|
|
|
+ do_machine_check(®s, 0);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
+{
|
|
|
+ /* already handled by vcpu_run */
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
{
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
@@ -2596,17 +2627,14 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
vect_info = vmx->idt_vectoring_info;
|
|
|
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
|
|
|
|
|
+ if (is_machine_check(intr_info))
|
|
|
+ return handle_machine_check(vcpu, kvm_run);
|
|
|
+
|
|
|
if ((vect_info & VECTORING_INFO_VALID_MASK) &&
|
|
|
!is_page_fault(intr_info))
|
|
|
printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
|
|
|
"intr info 0x%x\n", __func__, vect_info, intr_info);
|
|
|
|
|
|
- if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
|
|
|
- int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
|
|
|
- set_bit(irq, vcpu->arch.irq_pending);
|
|
|
- set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
|
|
|
- }
|
|
|
-
|
|
|
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
|
|
|
return 1; /* already handled by vmx_vcpu_run() */
|
|
|
|
|
@@ -2628,17 +2656,17 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
|
|
|
if (is_page_fault(intr_info)) {
|
|
|
/* EPT won't cause page fault directly */
|
|
|
- if (vm_need_ept())
|
|
|
+ if (enable_ept)
|
|
|
BUG();
|
|
|
cr2 = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
|
|
|
(u32)((u64)cr2 >> 32), handler);
|
|
|
- if (vcpu->arch.interrupt.pending || vcpu->arch.exception.pending)
|
|
|
+ if (kvm_event_needs_reinjection(vcpu))
|
|
|
kvm_mmu_unprotect_page_virt(vcpu, cr2);
|
|
|
return kvm_mmu_page_fault(vcpu, cr2, error_code);
|
|
|
}
|
|
|
|
|
|
- if (vcpu->arch.rmode.active &&
|
|
|
+ if (vcpu->arch.rmode.vm86_active &&
|
|
|
handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
|
|
|
error_code)) {
|
|
|
if (vcpu->arch.halt_request) {
|
|
@@ -2753,13 +2781,18 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg));
|
|
|
skip_emulated_instruction(vcpu);
|
|
|
return 1;
|
|
|
- case 8:
|
|
|
- kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg));
|
|
|
- skip_emulated_instruction(vcpu);
|
|
|
- if (irqchip_in_kernel(vcpu->kvm))
|
|
|
- return 1;
|
|
|
- kvm_run->exit_reason = KVM_EXIT_SET_TPR;
|
|
|
- return 0;
|
|
|
+ case 8: {
|
|
|
+ u8 cr8_prev = kvm_get_cr8(vcpu);
|
|
|
+ u8 cr8 = kvm_register_read(vcpu, reg);
|
|
|
+ kvm_set_cr8(vcpu, cr8);
|
|
|
+ skip_emulated_instruction(vcpu);
|
|
|
+ if (irqchip_in_kernel(vcpu->kvm))
|
|
|
+ return 1;
|
|
|
+ if (cr8_prev <= cr8)
|
|
|
+ return 1;
|
|
|
+ kvm_run->exit_reason = KVM_EXIT_SET_TPR;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
};
|
|
|
break;
|
|
|
case 2: /* clts */
|
|
@@ -2957,8 +2990,9 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
|
|
|
* If the user space waits to inject interrupts, exit as soon as
|
|
|
* possible
|
|
|
*/
|
|
|
- if (kvm_run->request_interrupt_window &&
|
|
|
- !vcpu->arch.irq_summary) {
|
|
|
+ if (!irqchip_in_kernel(vcpu->kvm) &&
|
|
|
+ kvm_run->request_interrupt_window &&
|
|
|
+ !kvm_cpu_has_interrupt(vcpu)) {
|
|
|
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
|
|
|
return 0;
|
|
|
}
|
|
@@ -2980,7 +3014,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
|
|
|
static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
{
|
|
|
- u64 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
|
|
|
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
|
|
|
kvm_mmu_invlpg(vcpu, exit_qualification);
|
|
|
skip_emulated_instruction(vcpu);
|
|
@@ -2996,11 +3030,11 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
|
|
|
static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
{
|
|
|
- u64 exit_qualification;
|
|
|
+ unsigned long exit_qualification;
|
|
|
enum emulation_result er;
|
|
|
unsigned long offset;
|
|
|
|
|
|
- exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
|
|
|
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
offset = exit_qualification & 0xffful;
|
|
|
|
|
|
er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
|
|
@@ -3019,22 +3053,41 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
unsigned long exit_qualification;
|
|
|
u16 tss_selector;
|
|
|
- int reason;
|
|
|
+ int reason, type, idt_v;
|
|
|
+
|
|
|
+ idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
|
|
|
+ type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
|
|
|
|
|
|
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
|
|
|
reason = (u32)exit_qualification >> 30;
|
|
|
- if (reason == TASK_SWITCH_GATE && vmx->vcpu.arch.nmi_injected &&
|
|
|
- (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
|
|
|
- (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK)
|
|
|
- == INTR_TYPE_NMI_INTR) {
|
|
|
- vcpu->arch.nmi_injected = false;
|
|
|
- if (cpu_has_virtual_nmis())
|
|
|
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
|
- GUEST_INTR_STATE_NMI);
|
|
|
+ if (reason == TASK_SWITCH_GATE && idt_v) {
|
|
|
+ switch (type) {
|
|
|
+ case INTR_TYPE_NMI_INTR:
|
|
|
+ vcpu->arch.nmi_injected = false;
|
|
|
+ if (cpu_has_virtual_nmis())
|
|
|
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
|
+ GUEST_INTR_STATE_NMI);
|
|
|
+ break;
|
|
|
+ case INTR_TYPE_EXT_INTR:
|
|
|
+ case INTR_TYPE_SOFT_INTR:
|
|
|
+ kvm_clear_interrupt_queue(vcpu);
|
|
|
+ break;
|
|
|
+ case INTR_TYPE_HARD_EXCEPTION:
|
|
|
+ case INTR_TYPE_SOFT_EXCEPTION:
|
|
|
+ kvm_clear_exception_queue(vcpu);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
tss_selector = exit_qualification;
|
|
|
|
|
|
+ if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
|
|
|
+ type != INTR_TYPE_EXT_INTR &&
|
|
|
+ type != INTR_TYPE_NMI_INTR))
|
|
|
+ skip_emulated_instruction(vcpu);
|
|
|
+
|
|
|
if (!kvm_task_switch(vcpu, tss_selector, reason))
|
|
|
return 0;
|
|
|
|
|
@@ -3051,11 +3104,11 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
|
|
|
static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
{
|
|
|
- u64 exit_qualification;
|
|
|
+ unsigned long exit_qualification;
|
|
|
gpa_t gpa;
|
|
|
int gla_validity;
|
|
|
|
|
|
- exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
|
|
|
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
|
|
|
if (exit_qualification & (1 << 6)) {
|
|
|
printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
|
|
@@ -3067,7 +3120,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
|
|
|
printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
|
|
|
(long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
|
|
|
- (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS));
|
|
|
+ vmcs_readl(GUEST_LINEAR_ADDRESS));
|
|
|
printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
|
|
|
(long unsigned int)exit_qualification);
|
|
|
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
|
|
@@ -3150,6 +3203,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
|
|
|
[EXIT_REASON_WBINVD] = handle_wbinvd,
|
|
|
[EXIT_REASON_TASK_SWITCH] = handle_task_switch,
|
|
|
[EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
|
|
|
+ [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
|
|
|
};
|
|
|
|
|
|
static const int kvm_vmx_max_exit_handlers =
|
|
@@ -3159,10 +3213,10 @@ static const int kvm_vmx_max_exit_handlers =
|
|
|
* The guest has exited. See if we can fix it or if we need userspace
|
|
|
* assistance.
|
|
|
*/
|
|
|
-static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
|
+static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
+ u32 exit_reason = vmx->exit_reason;
|
|
|
u32 vectoring_info = vmx->idt_vectoring_info;
|
|
|
|
|
|
KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
|
|
@@ -3178,7 +3232,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
|
|
|
|
/* Access CR3 don't cause VMExit in paging mode, so we need
|
|
|
* to sync with guest real CR3. */
|
|
|
- if (vm_need_ept() && is_paging(vcpu)) {
|
|
|
+ if (enable_ept && is_paging(vcpu)) {
|
|
|
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
|
|
|
ept_load_pdptrs(vcpu);
|
|
|
}
|
|
@@ -3199,9 +3253,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
|
__func__, vectoring_info, exit_reason);
|
|
|
|
|
|
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
|
|
|
- if (vcpu->arch.interrupt_window_open) {
|
|
|
+ if (vmx_interrupt_allowed(vcpu)) {
|
|
|
vmx->soft_vnmi_blocked = 0;
|
|
|
- vcpu->arch.nmi_window_open = 1;
|
|
|
} else if (vmx->vnmi_blocked_time > 1000000000LL &&
|
|
|
vcpu->arch.nmi_pending) {
|
|
|
/*
|
|
@@ -3214,7 +3267,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
|
"state on VCPU %d after 1 s timeout\n",
|
|
|
__func__, vcpu->vcpu_id);
|
|
|
vmx->soft_vnmi_blocked = 0;
|
|
|
- vmx->vcpu.arch.nmi_window_open = 1;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3228,122 +3280,107 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void update_tpr_threshold(struct kvm_vcpu *vcpu)
|
|
|
+static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
|
|
|
{
|
|
|
- int max_irr, tpr;
|
|
|
-
|
|
|
- if (!vm_need_tpr_shadow(vcpu->kvm))
|
|
|
- return;
|
|
|
-
|
|
|
- if (!kvm_lapic_enabled(vcpu) ||
|
|
|
- ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
|
|
|
+ if (irr == -1 || tpr < irr) {
|
|
|
vmcs_write32(TPR_THRESHOLD, 0);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
|
|
|
- vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
|
|
|
+ vmcs_write32(TPR_THRESHOLD, irr);
|
|
|
}
|
|
|
|
|
|
static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
|
|
|
{
|
|
|
u32 exit_intr_info;
|
|
|
- u32 idt_vectoring_info;
|
|
|
+ u32 idt_vectoring_info = vmx->idt_vectoring_info;
|
|
|
bool unblock_nmi;
|
|
|
u8 vector;
|
|
|
int type;
|
|
|
bool idtv_info_valid;
|
|
|
- u32 error;
|
|
|
|
|
|
exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
|
|
+
|
|
|
+ vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
|
|
|
+
|
|
|
+ /* Handle machine checks before interrupts are enabled */
|
|
|
+ if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
|
|
|
+ || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI
|
|
|
+ && is_machine_check(exit_intr_info)))
|
|
|
+ kvm_machine_check();
|
|
|
+
|
|
|
+ /* We need to handle NMIs before interrupts are enabled */
|
|
|
+ if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
|
|
|
+ (exit_intr_info & INTR_INFO_VALID_MASK)) {
|
|
|
+ KVMTRACE_0D(NMI, &vmx->vcpu, handler);
|
|
|
+ asm("int $2");
|
|
|
+ }
|
|
|
+
|
|
|
+ idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
|
|
|
+
|
|
|
if (cpu_has_virtual_nmis()) {
|
|
|
unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
|
|
|
vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
|
|
|
/*
|
|
|
- * SDM 3: 25.7.1.2
|
|
|
+ * SDM 3: 27.7.1.2 (September 2008)
|
|
|
* Re-set bit "block by NMI" before VM entry if vmexit caused by
|
|
|
* a guest IRET fault.
|
|
|
+ * SDM 3: 23.2.2 (September 2008)
|
|
|
+ * Bit 12 is undefined in any of the following cases:
|
|
|
+ * If the VM exit sets the valid bit in the IDT-vectoring
|
|
|
+ * information field.
|
|
|
+ * If the VM exit is due to a double fault.
|
|
|
*/
|
|
|
- if (unblock_nmi && vector != DF_VECTOR)
|
|
|
+ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
|
|
|
+ vector != DF_VECTOR && !idtv_info_valid)
|
|
|
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
|
GUEST_INTR_STATE_NMI);
|
|
|
} else if (unlikely(vmx->soft_vnmi_blocked))
|
|
|
vmx->vnmi_blocked_time +=
|
|
|
ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
|
|
|
|
|
|
- idt_vectoring_info = vmx->idt_vectoring_info;
|
|
|
- idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
|
|
|
+ vmx->vcpu.arch.nmi_injected = false;
|
|
|
+ kvm_clear_exception_queue(&vmx->vcpu);
|
|
|
+ kvm_clear_interrupt_queue(&vmx->vcpu);
|
|
|
+
|
|
|
+ if (!idtv_info_valid)
|
|
|
+ return;
|
|
|
+
|
|
|
vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
|
|
|
type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
|
|
|
- if (vmx->vcpu.arch.nmi_injected) {
|
|
|
+
|
|
|
+ switch (type) {
|
|
|
+ case INTR_TYPE_NMI_INTR:
|
|
|
+ vmx->vcpu.arch.nmi_injected = true;
|
|
|
/*
|
|
|
- * SDM 3: 25.7.1.2
|
|
|
- * Clear bit "block by NMI" before VM entry if a NMI delivery
|
|
|
- * faulted.
|
|
|
+ * SDM 3: 27.7.1.2 (September 2008)
|
|
|
+ * Clear bit "block by NMI" before VM entry if a NMI
|
|
|
+ * delivery faulted.
|
|
|
*/
|
|
|
- if (idtv_info_valid && type == INTR_TYPE_NMI_INTR)
|
|
|
- vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
|
- GUEST_INTR_STATE_NMI);
|
|
|
- else
|
|
|
- vmx->vcpu.arch.nmi_injected = false;
|
|
|
- }
|
|
|
- kvm_clear_exception_queue(&vmx->vcpu);
|
|
|
- if (idtv_info_valid && (type == INTR_TYPE_HARD_EXCEPTION ||
|
|
|
- type == INTR_TYPE_SOFT_EXCEPTION)) {
|
|
|
+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
|
+ GUEST_INTR_STATE_NMI);
|
|
|
+ break;
|
|
|
+ case INTR_TYPE_SOFT_EXCEPTION:
|
|
|
+ vmx->vcpu.arch.event_exit_inst_len =
|
|
|
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
|
|
|
+ /* fall through */
|
|
|
+ case INTR_TYPE_HARD_EXCEPTION:
|
|
|
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
|
|
|
- error = vmcs_read32(IDT_VECTORING_ERROR_CODE);
|
|
|
- kvm_queue_exception_e(&vmx->vcpu, vector, error);
|
|
|
+ u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE);
|
|
|
+ kvm_queue_exception_e(&vmx->vcpu, vector, err);
|
|
|
} else
|
|
|
kvm_queue_exception(&vmx->vcpu, vector);
|
|
|
- vmx->idt_vectoring_info = 0;
|
|
|
- }
|
|
|
- kvm_clear_interrupt_queue(&vmx->vcpu);
|
|
|
- if (idtv_info_valid && type == INTR_TYPE_EXT_INTR) {
|
|
|
- kvm_queue_interrupt(&vmx->vcpu, vector);
|
|
|
- vmx->idt_vectoring_info = 0;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void vmx_intr_assist(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- update_tpr_threshold(vcpu);
|
|
|
-
|
|
|
- vmx_update_window_states(vcpu);
|
|
|
-
|
|
|
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
|
|
- vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
|
- GUEST_INTR_STATE_STI |
|
|
|
- GUEST_INTR_STATE_MOV_SS);
|
|
|
-
|
|
|
- if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
|
|
|
- if (vcpu->arch.interrupt.pending) {
|
|
|
- enable_nmi_window(vcpu);
|
|
|
- } else if (vcpu->arch.nmi_window_open) {
|
|
|
- vcpu->arch.nmi_pending = false;
|
|
|
- vcpu->arch.nmi_injected = true;
|
|
|
- } else {
|
|
|
- enable_nmi_window(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
- }
|
|
|
- if (vcpu->arch.nmi_injected) {
|
|
|
- vmx_inject_nmi(vcpu);
|
|
|
- if (vcpu->arch.nmi_pending)
|
|
|
- enable_nmi_window(vcpu);
|
|
|
- else if (kvm_cpu_has_interrupt(vcpu))
|
|
|
- enable_irq_window(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
- if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) {
|
|
|
- if (vcpu->arch.interrupt_window_open)
|
|
|
- kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
|
|
|
- else
|
|
|
- enable_irq_window(vcpu);
|
|
|
- }
|
|
|
- if (vcpu->arch.interrupt.pending) {
|
|
|
- vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
|
|
|
- if (kvm_cpu_has_interrupt(vcpu))
|
|
|
- enable_irq_window(vcpu);
|
|
|
+ break;
|
|
|
+ case INTR_TYPE_SOFT_INTR:
|
|
|
+ vmx->vcpu.arch.event_exit_inst_len =
|
|
|
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
|
|
|
+ /* fall through */
|
|
|
+ case INTR_TYPE_EXT_INTR:
|
|
|
+ kvm_queue_interrupt(&vmx->vcpu, vector,
|
|
|
+ type == INTR_TYPE_SOFT_INTR);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3381,7 +3418,6 @@ static void fixup_rmode_irq(struct vcpu_vmx *vmx)
|
|
|
static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
{
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
- u32 intr_info;
|
|
|
|
|
|
/* Record the guest's net vcpu time for enforced NMI injections. */
|
|
|
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
|
|
@@ -3505,20 +3541,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
if (vmx->rmode.irq.pending)
|
|
|
fixup_rmode_irq(vmx);
|
|
|
|
|
|
- vmx_update_window_states(vcpu);
|
|
|
-
|
|
|
asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
|
|
|
vmx->launched = 1;
|
|
|
|
|
|
- intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
|
|
-
|
|
|
- /* We need to handle NMIs before interrupts are enabled */
|
|
|
- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
|
|
|
- (intr_info & INTR_INFO_VALID_MASK)) {
|
|
|
- KVMTRACE_0D(NMI, vcpu, handler);
|
|
|
- asm("int $2");
|
|
|
- }
|
|
|
-
|
|
|
vmx_complete_interrupts(vmx);
|
|
|
}
|
|
|
|
|
@@ -3593,7 +3618,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
|
|
if (alloc_apic_access_page(kvm) != 0)
|
|
|
goto free_vmcs;
|
|
|
|
|
|
- if (vm_need_ept())
|
|
|
+ if (enable_ept)
|
|
|
if (alloc_identity_pagetable(kvm) != 0)
|
|
|
goto free_vmcs;
|
|
|
|
|
@@ -3631,9 +3656,32 @@ static int get_ept_level(void)
|
|
|
return VMX_EPT_DEFAULT_GAW + 1;
|
|
|
}
|
|
|
|
|
|
-static int vmx_get_mt_mask_shift(void)
|
|
|
+static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
|
|
{
|
|
|
- return VMX_EPT_MT_EPTE_SHIFT;
|
|
|
+ u64 ret;
|
|
|
+
|
|
|
+ /* For VT-d and EPT combination
|
|
|
+ * 1. MMIO: always map as UC
|
|
|
+ * 2. EPT with VT-d:
|
|
|
+ * a. VT-d without snooping control feature: can't guarantee the
|
|
|
+ * result, try to trust guest.
|
|
|
+ * b. VT-d with snooping control feature: snooping control feature of
|
|
|
+ * VT-d engine can guarantee the cache correctness. Just set it
|
|
|
+ * to WB to keep consistent with host. So the same as item 3.
|
|
|
+ * 3. EPT without VT-d: always map as WB and set IGMT=1 to keep
|
|
|
+ * consistent with host MTRR
|
|
|
+ */
|
|
|
+ if (is_mmio)
|
|
|
+ ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
|
|
|
+ else if (vcpu->kvm->arch.iommu_domain &&
|
|
|
+ !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
|
|
|
+ ret = kvm_get_guest_memory_type(vcpu, gfn) <<
|
|
|
+ VMX_EPT_MT_EPTE_SHIFT;
|
|
|
+ else
|
|
|
+ ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
|
|
|
+ | VMX_EPT_IGMT_BIT;
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static struct kvm_x86_ops vmx_x86_ops = {
|
|
@@ -3644,7 +3692,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|
|
.check_processor_compatibility = vmx_check_processor_compat,
|
|
|
.hardware_enable = hardware_enable,
|
|
|
.hardware_disable = hardware_disable,
|
|
|
- .cpu_has_accelerated_tpr = cpu_has_vmx_virtualize_apic_accesses,
|
|
|
+ .cpu_has_accelerated_tpr = report_flexpriority,
|
|
|
|
|
|
.vcpu_create = vmx_create_vcpu,
|
|
|
.vcpu_free = vmx_free_vcpu,
|
|
@@ -3678,78 +3726,82 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|
|
.tlb_flush = vmx_flush_tlb,
|
|
|
|
|
|
.run = vmx_vcpu_run,
|
|
|
- .handle_exit = kvm_handle_exit,
|
|
|
+ .handle_exit = vmx_handle_exit,
|
|
|
.skip_emulated_instruction = skip_emulated_instruction,
|
|
|
+ .set_interrupt_shadow = vmx_set_interrupt_shadow,
|
|
|
+ .get_interrupt_shadow = vmx_get_interrupt_shadow,
|
|
|
.patch_hypercall = vmx_patch_hypercall,
|
|
|
- .get_irq = vmx_get_irq,
|
|
|
.set_irq = vmx_inject_irq,
|
|
|
+ .set_nmi = vmx_inject_nmi,
|
|
|
.queue_exception = vmx_queue_exception,
|
|
|
- .exception_injected = vmx_exception_injected,
|
|
|
- .inject_pending_irq = vmx_intr_assist,
|
|
|
- .inject_pending_vectors = do_interrupt_requests,
|
|
|
+ .interrupt_allowed = vmx_interrupt_allowed,
|
|
|
+ .nmi_allowed = vmx_nmi_allowed,
|
|
|
+ .enable_nmi_window = enable_nmi_window,
|
|
|
+ .enable_irq_window = enable_irq_window,
|
|
|
+ .update_cr8_intercept = update_cr8_intercept,
|
|
|
|
|
|
.set_tss_addr = vmx_set_tss_addr,
|
|
|
.get_tdp_level = get_ept_level,
|
|
|
- .get_mt_mask_shift = vmx_get_mt_mask_shift,
|
|
|
+ .get_mt_mask = vmx_get_mt_mask,
|
|
|
};
|
|
|
|
|
|
static int __init vmx_init(void)
|
|
|
{
|
|
|
- void *va;
|
|
|
int r;
|
|
|
|
|
|
- vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
|
|
|
+ vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
|
|
|
if (!vmx_io_bitmap_a)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
|
|
|
+ vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
|
|
|
if (!vmx_io_bitmap_b) {
|
|
|
r = -ENOMEM;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- vmx_msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
|
|
|
- if (!vmx_msr_bitmap) {
|
|
|
+ vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
|
|
|
+ if (!vmx_msr_bitmap_legacy) {
|
|
|
r = -ENOMEM;
|
|
|
goto out1;
|
|
|
}
|
|
|
|
|
|
+ vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
|
|
|
+ if (!vmx_msr_bitmap_longmode) {
|
|
|
+ r = -ENOMEM;
|
|
|
+ goto out2;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Allow direct access to the PC debug port (it is often used for I/O
|
|
|
* delays, but the vmexits simply slow things down).
|
|
|
*/
|
|
|
- va = kmap(vmx_io_bitmap_a);
|
|
|
- memset(va, 0xff, PAGE_SIZE);
|
|
|
- clear_bit(0x80, va);
|
|
|
- kunmap(vmx_io_bitmap_a);
|
|
|
+ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
|
|
|
+ clear_bit(0x80, vmx_io_bitmap_a);
|
|
|
|
|
|
- va = kmap(vmx_io_bitmap_b);
|
|
|
- memset(va, 0xff, PAGE_SIZE);
|
|
|
- kunmap(vmx_io_bitmap_b);
|
|
|
+ memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
|
|
|
|
|
|
- va = kmap(vmx_msr_bitmap);
|
|
|
- memset(va, 0xff, PAGE_SIZE);
|
|
|
- kunmap(vmx_msr_bitmap);
|
|
|
+ memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
|
|
|
+ memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
|
|
|
|
|
|
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
|
|
|
|
|
|
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
|
|
|
if (r)
|
|
|
- goto out2;
|
|
|
+ goto out3;
|
|
|
|
|
|
- vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE);
|
|
|
- vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE);
|
|
|
- vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS);
|
|
|
- vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
|
|
|
- vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
|
|
|
+ vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
|
|
|
+ vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
|
|
|
+ vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
|
|
|
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
|
|
|
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
|
|
|
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
|
|
|
|
|
|
- if (vm_need_ept()) {
|
|
|
+ if (enable_ept) {
|
|
|
bypass_guest_pf = 0;
|
|
|
kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
|
|
|
VMX_EPT_WRITABLE_MASK);
|
|
|
kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
|
|
|
- VMX_EPT_EXECUTABLE_MASK,
|
|
|
- VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
|
|
|
+ VMX_EPT_EXECUTABLE_MASK);
|
|
|
kvm_enable_tdp();
|
|
|
} else
|
|
|
kvm_disable_tdp();
|
|
@@ -3761,20 +3813,23 @@ static int __init vmx_init(void)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
+out3:
|
|
|
+ free_page((unsigned long)vmx_msr_bitmap_longmode);
|
|
|
out2:
|
|
|
- __free_page(vmx_msr_bitmap);
|
|
|
+ free_page((unsigned long)vmx_msr_bitmap_legacy);
|
|
|
out1:
|
|
|
- __free_page(vmx_io_bitmap_b);
|
|
|
+ free_page((unsigned long)vmx_io_bitmap_b);
|
|
|
out:
|
|
|
- __free_page(vmx_io_bitmap_a);
|
|
|
+ free_page((unsigned long)vmx_io_bitmap_a);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
static void __exit vmx_exit(void)
|
|
|
{
|
|
|
- __free_page(vmx_msr_bitmap);
|
|
|
- __free_page(vmx_io_bitmap_b);
|
|
|
- __free_page(vmx_io_bitmap_a);
|
|
|
+ free_page((unsigned long)vmx_msr_bitmap_legacy);
|
|
|
+ free_page((unsigned long)vmx_msr_bitmap_longmode);
|
|
|
+ free_page((unsigned long)vmx_io_bitmap_b);
|
|
|
+ free_page((unsigned long)vmx_io_bitmap_a);
|
|
|
|
|
|
kvm_exit();
|
|
|
}
|