|
@@ -1813,6 +1813,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
|
|
return;
|
|
|
}
|
|
|
break;
|
|
|
+ case MSR_IA32_PEBS_ENABLE:
|
|
|
+ /* PEBS needs a quiescent period after being disabled (to write
|
|
|
+ * a record). Disabling PEBS through VMX MSR swapping doesn't
|
|
|
+ * provide that period, so a CPU could write host's record into
|
|
|
+ * guest's memory.
|
|
|
+ */
|
|
|
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < m->nr; ++i)
|
|
@@ -1850,26 +1857,31 @@ static void reload_tss(void)
|
|
|
|
|
|
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
|
|
|
{
|
|
|
- u64 guest_efer;
|
|
|
- u64 ignore_bits;
|
|
|
+ u64 guest_efer = vmx->vcpu.arch.efer;
|
|
|
+ u64 ignore_bits = 0;
|
|
|
|
|
|
- guest_efer = vmx->vcpu.arch.efer;
|
|
|
+ if (!enable_ept) {
|
|
|
+ /*
|
|
|
+ * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
|
|
|
+ * host CPUID is more efficient than testing guest CPUID
|
|
|
+ * or CR4. Host SMEP is anyway a requirement for guest SMEP.
|
|
|
+ */
|
|
|
+ if (boot_cpu_has(X86_FEATURE_SMEP))
|
|
|
+ guest_efer |= EFER_NX;
|
|
|
+ else if (!(guest_efer & EFER_NX))
|
|
|
+ ignore_bits |= EFER_NX;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
- * NX is emulated; LMA and LME handled by hardware; SCE meaningless
|
|
|
- * outside long mode
|
|
|
+ * LMA and LME handled by hardware; SCE meaningless outside long mode.
|
|
|
*/
|
|
|
- ignore_bits = EFER_NX | EFER_SCE;
|
|
|
+ ignore_bits |= EFER_SCE;
|
|
|
#ifdef CONFIG_X86_64
|
|
|
ignore_bits |= EFER_LMA | EFER_LME;
|
|
|
/* SCE is meaningful only in long mode on Intel */
|
|
|
if (guest_efer & EFER_LMA)
|
|
|
ignore_bits &= ~(u64)EFER_SCE;
|
|
|
#endif
|
|
|
- guest_efer &= ~ignore_bits;
|
|
|
- guest_efer |= host_efer & ignore_bits;
|
|
|
- vmx->guest_msrs[efer_offset].data = guest_efer;
|
|
|
- vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
|
|
|
|
|
|
clear_atomic_switch_msr(vmx, MSR_EFER);
|
|
|
|
|
@@ -1880,16 +1892,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
|
|
|
*/
|
|
|
if (cpu_has_load_ia32_efer ||
|
|
|
(enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
|
|
|
- guest_efer = vmx->vcpu.arch.efer;
|
|
|
if (!(guest_efer & EFER_LMA))
|
|
|
guest_efer &= ~EFER_LME;
|
|
|
if (guest_efer != host_efer)
|
|
|
add_atomic_switch_msr(vmx, MSR_EFER,
|
|
|
guest_efer, host_efer);
|
|
|
return false;
|
|
|
- }
|
|
|
+ } else {
|
|
|
+ guest_efer &= ~ignore_bits;
|
|
|
+ guest_efer |= host_efer & ignore_bits;
|
|
|
|
|
|
- return true;
|
|
|
+ vmx->guest_msrs[efer_offset].data = guest_efer;
|
|
|
+ vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
|
|
|
+
|
|
|
+ return true;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static unsigned long segment_base(u16 selector)
|